You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/09/28 02:18:38 UTC

[01/29] lucene-solr:jira/http2: SOLR-12792: extract test data into separate files in autoscaling tests

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/http2 1664918f9 -> be332a26b


SOLR-12792: extract test data into separate files in autoscaling tests


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6adeb5bc
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6adeb5bc
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6adeb5bc

Branch: refs/heads/jira/http2
Commit: 6adeb5bc44040219962f00ddbf3d956e16149b62
Parents: d7e97fb
Author: Noble Paul <no...@apache.org>
Authored: Fri Sep 21 12:55:41 2018 +1000
Committer: Noble Paul <no...@apache.org>
Committed: Fri Sep 21 12:55:41 2018 +1000

----------------------------------------------------------------------
 .../java/org/apache/solr/common/util/Utils.java |   4 +-
 .../testScheduledTriggerFailure.json            |  52 ++
 .../solrj/solr/autoscaling/testSortError.json   | 225 ++++++++
 .../autoscaling/testSysPropSuggestions.json     | 119 ++++
 .../autoscaling/testUtilizeNodeFailure.json     |  69 +++
 .../autoscaling/testUtilizeNodeFailure2.json    |  66 +++
 .../solrj/cloud/autoscaling/TestPolicy.java     | 541 +------------------
 .../solrj/cloud/autoscaling/TestPolicy2.java    | 166 +-----
 8 files changed, 548 insertions(+), 694 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
index b389632..9e72607 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
@@ -232,7 +232,7 @@ public class Utils {
 
   public static Object fromJSON(InputStream is){
     try {
-      return new ObjectBuilder(getJSONParser((new InputStreamReader(is, StandardCharsets.UTF_8)))).getObject();
+      return new ObjectBuilder(getJSONParser((new InputStreamReader(is, StandardCharsets.UTF_8)))).getVal();
     } catch (IOException e) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Parse error", e);
     }
@@ -260,7 +260,7 @@ public class Utils {
 
   public static Object fromJSONString(String json)  {
     try {
-      return new ObjectBuilder(getJSONParser(new StringReader(json))).getObject();
+      return new ObjectBuilder(getJSONParser(new StringReader(json))).getVal();
     } catch (IOException e) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Parse error", e);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/test-files/solrj/solr/autoscaling/testScheduledTriggerFailure.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testScheduledTriggerFailure.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testScheduledTriggerFailure.json
new file mode 100644
index 0000000..9347494
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testScheduledTriggerFailure.json
@@ -0,0 +1,52 @@
+{
+  "liveNodes":["127.0.0.1:49221_solr",
+    "127.0.0.1:49210_solr"],
+  "suggester":{
+    "action":"MOVEREPLICA",
+    "hints":{}},
+  "replicaInfo":{
+    "127.0.0.1:49210_solr":{"testScheduledTrigger":{"shard1":[{"core_node3":{
+      "base_url":"http://127.0.0.1:49210/solr",
+      "node_name":"127.0.0.1:49210_solr",
+      "core":"testScheduledTrigger_shard1_replica_n1",
+      "state":"active",
+      "type":"NRT",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "shard":"shard1",
+      "collection":"testScheduledTrigger"}},
+      {"core_node6":{
+        "base_url":"http://127.0.0.1:49210/solr",
+        "node_name":"127.0.0.1:49210_solr",
+        "core":"testScheduledTrigger_shard1_replica_n4",
+        "state":"active",
+        "type":"NRT",
+        "INDEX.sizeInBytes":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"testScheduledTrigger"}}]}},
+    "127.0.0.1:49221_solr":{"testScheduledTrigger":{"shard1":[{"core_node5":{
+      "core":"testScheduledTrigger_shard1_replica_n2",
+      "leader":"true",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "base_url":"http://127.0.0.1:49221/solr",
+      "node_name":"127.0.0.1:49221_solr",
+      "state":"active",
+      "type":"NRT",
+      "shard":"shard1",
+      "collection":"testScheduledTrigger"}}]}}},
+  "nodeValues":{
+    "127.0.0.1:49210_solr":{
+      "node":"127.0.0.1:49210_solr",
+      "cores":2,
+      "freedisk":197.39717864990234},
+    "127.0.0.1:49221_solr":{
+      "node":"127.0.0.1:49221_solr",
+      "cores":1,
+      "freedisk":197.39717864990234}},
+  "autoscalingJson":{
+    "cluster-preferences":[{
+      "minimize":"cores",
+      "precision":1},
+      {"maximize":"freedisk"}],
+    "cluster-policy":[{
+      "cores":"<3",
+      "node":"#EACH"}]}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/test-files/solrj/solr/autoscaling/testSortError.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testSortError.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSortError.json
new file mode 100644
index 0000000..434e627
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSortError.json
@@ -0,0 +1,225 @@
+[{"node":"solr-01:8983_solr",
+  "replicas":{},
+  "isLive":true,
+  "attributes":[{"cores":2},
+    {"freedisk":1734.5261459350586},
+    {"sysLoadAvg":35.0},
+    {"node":"solr-01:8983_solr"}]},
+  {
+    "node":"solr-07:8983_solr",
+    "replicas":{},
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1721.5669250488281},
+      {"sysLoadAvg":10.0},
+      {"node":"solr-07:8983_solr"}]},
+  {
+    "node":"solr-08:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1764.9518203735352},
+      {"sysLoadAvg":330.0},
+      {"node":"solr-08:8983_solr"}]},
+  {
+    "node":"solr-25:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1779.7792778015137},
+      {"sysLoadAvg":304.0},
+      {"node":"solr-25:8983_solr"}]},
+  {
+    "node":"solr-15:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1697.5930519104004},
+      {"sysLoadAvg":277.0},
+      {"node":"solr-15:8983_solr"}]},
+  {
+    "node":"solr-13:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":2},
+      {"freedisk":1755.1909484863281},
+      {"sysLoadAvg":265.0},
+      {"node":"solr-13:8983_solr"}]},
+  {
+    "node":"solr-14:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1757.6035423278809},
+      {"sysLoadAvg":61.0},
+      {"node":"solr-14:8983_solr"}]},
+  {
+    "node":"solr-16:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1746.081386566162},
+      {"sysLoadAvg":260.0},
+      {"node":"solr-16:8983_solr"}]},
+  {
+    "node":"solr-04:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":2},
+      {"freedisk":1708.7230529785156},
+      {"sysLoadAvg":216.0},
+      {"node":"solr-04:8983_solr"}]},
+  {
+    "node":"solr-06:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1688.3182678222656},
+      {"sysLoadAvg":385.0},
+      {"node":"solr-06:8983_solr"}]},
+  {
+    "node":"solr-02:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":6},
+      {"freedisk":1778.226963043213},
+      {"sysLoadAvg":369.0},
+      {"node":"solr-02:8983_solr"}]},
+  {
+    "node":"solr-05:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1741.9401931762695},
+      {"sysLoadAvg":354.0},
+      {"node":"solr-05:8983_solr"}]},
+  {
+    "node":"solr-23:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1718.854579925537},
+      {"sysLoadAvg":329.0},
+      {"node":"solr-23:8983_solr"}]},
+  {
+    "node":"solr-24:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1733.6669311523438},
+      {"sysLoadAvg":327.0},
+      {"node":"solr-24:8983_solr"}]},
+  {
+    "node":"solr-09:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1714.6191711425781},
+      {"sysLoadAvg":278.0},
+      {"node":"solr-09:8983_solr"}]},
+  {
+    "node":"solr-10:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1755.3038482666016},
+      {"sysLoadAvg":266.0},
+      {"node":"solr-10:8983_solr"}]},
+  {
+    "node":"solr-28:8983_solr",
+    "isLive":false,
+    "attributes":[{"cores":3},
+      {"freedisk":1691.3830909729004},
+      {"sysLoadAvg":261.0},
+      {"node":"solr-28:8983_solr"}]},
+  {
+    "node":"solr-29:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":2},
+      {"freedisk":1706.797966003418},
+      {"sysLoadAvg":252.99999999999997},
+      {"node":"solr-29:8983_solr"}]},
+  {
+    "node":"solr-32:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1762.432300567627},
+      {"sysLoadAvg":221.0},
+      {"node":"solr-32:8983_solr"}]},
+  {
+    "node":"solr-21:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1760.9801979064941},
+      {"sysLoadAvg":213.0},
+      {"node":"solr-21:8983_solr"}]},
+  {
+    "node":"solr-22:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1780.5297241210938},
+      {"sysLoadAvg":209.0},
+      {"node":"solr-22:8983_solr"}]},
+  {
+    "node":"solr-31:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1700.1481628417969},
+      {"sysLoadAvg":211.0},
+      {"node":"solr-31:8983_solr"}]},
+  {
+    "node":"solr-33:8983_solr",
+    "isLive":false,
+    "attributes":[{"cores":3},
+      {"freedisk":1748.1132926940918},
+      {"sysLoadAvg":199.0},
+      {"node":"solr-33:8983_solr"}]},
+  {
+    "node":"solr-36:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1776.197639465332},
+      {"sysLoadAvg":193.0},
+      {"node":"solr-36:8983_solr"}]},
+  {
+    "node":"solr-35:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1746.7729606628418},
+      {"sysLoadAvg":191.0},
+      {"node":"solr-35:8983_solr"}]},
+  {
+    "node":"solr-12:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1713.287540435791},
+      {"sysLoadAvg":175.0},
+      {"node":"solr-12:8983_solr"}]},
+  {
+    "node":"solr-11:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1736.784511566162},
+      {"sysLoadAvg":169.0},
+      {"node":"solr-11:8983_solr"}]},
+  {
+    "node":"solr-35:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1766.9416885375977},
+      {"sysLoadAvg":155.0},
+      {"node":"solr-35:8983_solr"}]},
+  {
+    "node":"solr-17:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":3},
+      {"freedisk":1764.3425407409668},
+      {"sysLoadAvg":139.0},
+      {"node":"solr-17:8983_solr"}]},
+  {
+    "node":"solr-18:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":2},
+      {"freedisk":1757.0613975524902},
+      {"sysLoadAvg":132.0},
+      {"node":"solr-18:8983_solr"}]},
+  {
+    "node":"solr-20:8983_solr",
+    "isLive":false,
+    "attributes":[{"cores":3},
+      {"freedisk":1747.4205322265625},
+      {"sysLoadAvg":126.0},
+      {"node":"solr-20:8983_solr"}]},
+  {
+    "node":"solr-27:8983_solr",
+    "isLive":true,
+    "attributes":[{"cores":4},
+      {"freedisk":1721.0442085266113},
+      {"sysLoadAvg":118.0},
+      {"node":"solr-27:8983_solr"}]}]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/test-files/solrj/solr/autoscaling/testSysPropSuggestions.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testSysPropSuggestions.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSysPropSuggestions.json
new file mode 100644
index 0000000..aabcb62
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSysPropSuggestions.json
@@ -0,0 +1,119 @@
+{"diagnostics":{
+  "sortedNodes":[{
+    "node":"127.0.0.1:63191_solr",
+    "isLive":true,
+    "cores":3.0,
+    "sysprop.zone":"east",
+    "freedisk":1727.1459312438965,
+    "heapUsage":24.97510064011647,
+    "sysLoadAvg":272.75390625,
+    "totaldisk":1037.938980102539,
+    "replicas":{"zonesTest":{"shard1":[{"core_node5":{
+      "core":"zonesTest_shard1_replica_n2",
+      "leader":"true",
+      "base_url":"https://127.0.0.1:63191/solr",
+      "node_name":"127.0.0.1:63191_solr",
+      "state":"active",
+      "type":"NRT",
+      "force_set_state":"false",
+      "INDEX.sizeInGB":6.426125764846802E-8,
+      "shard":"shard1",
+      "collection":"zonesTest"}},
+      {"core_node7":{
+        "core":"zonesTest_shard1_replica_n4",
+        "base_url":"https://127.0.0.1:63191/solr",
+        "node_name":"127.0.0.1:63191_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"zonesTest"}},
+      {"core_node12":{
+        "core":"zonesTest_shard1_replica_n10",
+        "base_url":"https://127.0.0.1:63191/solr",
+        "node_name":"127.0.0.1:63191_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"zonesTest"}}]}}},
+    {
+      "node":"127.0.0.1:63192_solr",
+      "isLive":true,
+      "cores":3.0,
+      "sysprop.zone":"east",
+      "freedisk":1727.1459312438965,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{"zonesTest":{"shard2":[{"core_node3":{
+        "core":"zonesTest_shard1_replica_n1",
+        "base_url":"https://127.0.0.1:63192/solr",
+        "node_name":"127.0.0.1:63192_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard2",
+        "collection":"zonesTest"}},
+        {"core_node9":{
+          "core":"zonesTest_shard1_replica_n6",
+          "base_url":"https://127.0.0.1:63192/solr",
+          "node_name":"127.0.0.1:63192_solr",
+          "state":"active",
+          "type":"NRT",
+          "force_set_state":"false",
+          "INDEX.sizeInGB":6.426125764846802E-8,
+          "shard":"shard2",
+          "collection":"zonesTest"}},
+        {"core_node11":{
+          "core":"zonesTest_shard1_replica_n8",
+          "base_url":"https://127.0.0.1:63192/solr",
+          "node_name":"127.0.0.1:63192_solr",
+          "state":"active",
+          "type":"NRT",
+          "force_set_state":"false",
+          "INDEX.sizeInGB":6.426125764846802E-8,
+          "shard":"shard2",
+          "collection":"zonesTest"}}]}}},
+    {
+      "node":"127.0.0.1:63219_solr",
+      "isLive":true,
+      "cores":0.0,
+      "sysprop.zone":"west",
+      "freedisk":1768.6174201965332,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{}},
+    {
+      "node":"127.0.0.1:63229_solr",
+      "isLive":true,
+      "cores":0.0,
+      "sysprop.zone":"west",
+      "freedisk":1768.6174201965332,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{}}],
+  "liveNodes":["127.0.0.1:63191_solr",
+    "127.0.0.1:63192_solr",
+    "127.0.0.1:63219_solr",
+    "127.0.0.1:63229_solr"],
+  "config":{
+    "cluster-preferences":[{
+      "minimize":"cores",
+      "precision":1},
+      {
+        "maximize":"freedisk",
+        "precision":100},
+      {
+        "minimize":"sysLoadAvg",
+        "precision":10}],
+    "cluster-policy":[{
+      "replica":"<3",
+      "shard":"#EACH",
+      "sysprop.zone":["east",
+        "west"]}]}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure.json
new file mode 100644
index 0000000..350957c
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure.json
@@ -0,0 +1,69 @@
+{
+  "liveNodes":["127.0.0.1:50417_solr",
+    "127.0.0.1:50418_solr",
+    "127.0.0.1:50419_solr",
+    "127.0.0.1:50420_solr",
+    "127.0.0.1:50443_solr"],
+  "suggester":{
+    "action":"MOVEREPLICA",
+    "hints":{"TARGET_NODE":["127.0.0.1:50443_solr"]}},
+  "replicaInfo":{
+    "127.0.0.1:50418_solr":{"utilizenodecoll":{"shard2":[{"core_node7":{
+      "core":"utilizenodecoll_shard2_replica_n4",
+      "leader":"true",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "base_url":"http://127.0.0.1:50418/solr",
+      "node_name":"127.0.0.1:50418_solr",
+      "state":"active",
+      "type":"NRT",
+      "shard":"shard2",
+      "collection":"utilizenodecoll"}}]}},
+    "127.0.0.1:50417_solr":{"utilizenodecoll":{"shard2":[{"core_node8":{
+      "base_url":"http://127.0.0.1:50417/solr",
+      "node_name":"127.0.0.1:50417_solr",
+      "core":"utilizenodecoll_shard2_replica_n6",
+      "state":"active",
+      "type":"NRT",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "shard":"shard2",
+      "collection":"utilizenodecoll"}}]}},
+    "127.0.0.1:50419_solr":{"utilizenodecoll":{"shard1":[{"core_node5":{
+      "base_url":"http://127.0.0.1:50419/solr",
+      "node_name":"127.0.0.1:50419_solr",
+      "core":"utilizenodecoll_shard1_replica_n2",
+      "state":"active",
+      "type":"NRT",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "shard":"shard1",
+      "collection":"utilizenodecoll"}}]}},
+    "127.0.0.1:50420_solr":{"utilizenodecoll":{"shard1":[{"core_node3":{
+      "core":"utilizenodecoll_shard1_replica_n1",
+      "leader":"true",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "base_url":"http://127.0.0.1:50420/solr",
+      "node_name":"127.0.0.1:50420_solr",
+      "state":"active",
+      "type":"NRT",
+      "shard":"shard1",
+      "collection":"utilizenodecoll"}}]}},
+    "127.0.0.1:50443_solr":{}},
+  "nodeValues":{
+    "127.0.0.1:50418_solr":{
+      "cores":1,
+      "freedisk":187.70782089233398},
+    "127.0.0.1:50417_solr":{
+      "cores":1,
+      "freedisk":187.70782089233398},
+    "127.0.0.1:50419_solr":{
+      "cores":1,
+      "freedisk":187.70782089233398},
+    "127.0.0.1:50420_solr":{
+      "cores":1,
+      "freedisk":187.70782089233398},
+    "127.0.0.1:50443_solr":{
+      "cores":0,
+      "freedisk":187.70782089233398}},
+  "autoscalingJson":{"cluster-preferences":[{
+    "minimize":"cores",
+    "precision":1},
+    {"maximize":"freedisk"}]}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure2.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure2.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure2.json
new file mode 100644
index 0000000..2d6c384
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testUtilizeNodeFailure2.json
@@ -0,0 +1,66 @@
+{
+  "liveNodes":["127.0.0.1:51075_solr",
+    "127.0.0.1:51076_solr",
+    "127.0.0.1:51077_solr",
+    "127.0.0.1:51097_solr"],
+  "suggester":{
+    "action":"MOVEREPLICA",
+    "hints":{"TARGET_NODE":["127.0.0.1:51097_solr"]}},
+  "replicaInfo":{
+    "127.0.0.1:51076_solr":{"utilizenodecoll":{"shard1":[{"core_node5":{
+      "base_url":"https://127.0.0.1:51076/solr",
+      "node_name":"127.0.0.1:51076_solr",
+      "core":"utilizenodecoll_shard1_replica_n2",
+      "state":"active",
+      "type":"NRT",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "shard":"shard1",
+      "collection":"utilizenodecoll"}}]}},
+    "127.0.0.1:51077_solr":{"utilizenodecoll":{
+      "shard2":[{"core_node8":{
+        "base_url":"https://127.0.0.1:51077/solr",
+        "node_name":"127.0.0.1:51077_solr",
+        "core":"utilizenodecoll_shard2_replica_n6",
+        "state":"active",
+        "type":"NRT",
+        "INDEX.sizeInBytes":6.426125764846802E-8,
+        "shard":"shard2",
+        "collection":"utilizenodecoll"}}],
+      "shard1":[{"core_node3":{
+        "core":"utilizenodecoll_shard1_replica_n1",
+        "leader":"true",
+        "INDEX.sizeInBytes":6.426125764846802E-8,
+        "base_url":"https://127.0.0.1:51077/solr",
+        "node_name":"127.0.0.1:51077_solr",
+        "state":"active",
+        "type":"NRT",
+        "shard":"shard1",
+        "collection":"utilizenodecoll"}}]}},
+    "127.0.0.1:51097_solr":{},
+    "127.0.0.1:51075_solr":{"utilizenodecoll":{"shard2":[{"core_node7":{
+      "core":"utilizenodecoll_shard2_replica_n4",
+      "leader":"true",
+      "INDEX.sizeInBytes":6.426125764846802E-8,
+      "base_url":"https://127.0.0.1:51075/solr",
+      "node_name":"127.0.0.1:51075_solr",
+      "state":"active",
+      "type":"NRT",
+      "shard":"shard2",
+      "collection":"utilizenodecoll"}}]}}},
+  "nodeValues":{
+    "127.0.0.1:51076_solr":{
+      "cores":1,
+      "freedisk":188.7262191772461},
+    "127.0.0.1:51077_solr":{
+      "cores":2,
+      "freedisk":188.7262191772461},
+    "127.0.0.1:51097_solr":{
+      "cores":0,
+      "freedisk":188.7262191772461},
+    "127.0.0.1:51075_solr":{
+      "cores":1,
+      "freedisk":188.7262191772461}},
+  "autoscalingJson":{"cluster-preferences":[{
+    "minimize":"cores",
+    "precision":1},
+    {"maximize":"freedisk"}]}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index a48141e..4a16259 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -2889,97 +2889,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testScheduledTriggerFailure() throws Exception {
-    String state = "{" +
-        "  'liveNodes': [" +
-        "    '127.0.0.1:49221_solr'," +
-        "    '127.0.0.1:49210_solr'" +
-        "  ]," +
-        "  'suggester': {" +
-        "    'action': 'MOVEREPLICA'," +
-        "    'hints': {}" +
-        "  }," +
-        "  'replicaInfo': {" +
-        "    '127.0.0.1:49210_solr': {" +
-        "      'testScheduledTrigger': {" +
-        "        'shard1': [" +
-        "          {" +
-        "            'core_node3': {" +
-        "              'base_url': 'http://127.0.0.1:49210/solr'," +
-        "              'node_name': '127.0.0.1:49210_solr'," +
-        "              'core': 'testScheduledTrigger_shard1_replica_n1'," +
-        "              'state': 'active'," +
-        "              'type': 'NRT'," +
-        "              'INDEX.sizeInBytes': 6.426125764846802E-8," +
-        "              'shard': 'shard1'," +
-        "              'collection': 'testScheduledTrigger'" +
-        "            }" +
-        "          }," +
-        "          {" +
-        "            'core_node6': {" +
-        "              'base_url': 'http://127.0.0.1:49210/solr'," +
-        "              'node_name': '127.0.0.1:49210_solr'," +
-        "              'core': 'testScheduledTrigger_shard1_replica_n4'," +
-        "              'state': 'active'," +
-        "              'type': 'NRT'," +
-        "              'INDEX.sizeInBytes': 6.426125764846802E-8," +
-        "              'shard': 'shard1'," +
-        "              'collection': 'testScheduledTrigger'" +
-        "            }" +
-        "          }" +
-        "        ]" +
-        "      }" +
-        "    }," +
-        "    '127.0.0.1:49221_solr': {" +
-        "      'testScheduledTrigger': {" +
-        "        'shard1': [" +
-        "          {" +
-        "            'core_node5': {" +
-        "              'core': 'testScheduledTrigger_shard1_replica_n2'," +
-        "              'leader': 'true'," +
-        "              'INDEX.sizeInBytes': 6.426125764846802E-8," +
-        "              'base_url': 'http://127.0.0.1:49221/solr'," +
-        "              'node_name': '127.0.0.1:49221_solr'," +
-        "              'state': 'active'," +
-        "              'type': 'NRT'," +
-        "              'shard': 'shard1'," +
-        "              'collection': 'testScheduledTrigger'" +
-        "            }" +
-        "          }" +
-        "        ]" +
-        "      }" +
-        "    }" +
-        "  }," +
-        "  'nodeValues': {" +
-        "    '127.0.0.1:49210_solr': {" +
-        "      'node': '127.0.0.1:49210_solr'," +
-        "      'cores': 2," +
-        "      'freedisk': 197.39717864990234" +
-        "    }," +
-        "    '127.0.0.1:49221_solr': {" +
-        "      'node': '127.0.0.1:49221_solr'," +
-        "      'cores': 1," +
-        "      'freedisk': 197.39717864990234" +
-        "    }" +
-        "  }," +
-        "  'autoscalingJson': {" +
-        "    'cluster-preferences': [" +
-        "      {" +
-        "        'minimize': 'cores'," +
-        "        'precision': 1" +
-        "      }," +
-        "      {" +
-        "        'maximize': 'freedisk'" +
-        "      }" +
-        "    ]," +
-        "    'cluster-policy': [" +
-        "      {" +
-        "        'cores': '<3'," +
-        "        'node': '#EACH'" +
-        "      }" +
-        "    ]" +
-        "  }" +
-        "}";
-    Map jsonObj = (Map) Utils.fromJSONString(state);
+    Map jsonObj = (Map) TestPolicy2.loadFromResource("testScheduledTriggerFailure.json");
     SolrCloudManager cloudManager = createCloudManager(jsonObj);
     Suggester suggester = createSuggester(cloudManager, jsonObj, null);
     int count = 0;
@@ -2995,118 +2905,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testUtilizeNodeFailure() throws Exception {
-    String state = "{'liveNodes': ['127.0.0.1:50417_solr', '127.0.0.1:50418_solr', '127.0.0.1:50419_solr', '127.0.0.1:50420_solr', '127.0.0.1:50443_solr']," +
-        "  'suggester': {" +
-        "    'action': 'MOVEREPLICA'," +
-        "    'hints': {'TARGET_NODE': ['127.0.0.1:50443_solr']}" +
-        "  }," +
-        "  'replicaInfo': {" +
-        "    '127.0.0.1:50418_solr': {" +
-        "      'utilizenodecoll': {" +
-        "        'shard2': [" +
-        "          {" +
-        "            'core_node7': {" +
-        "              'core': 'utilizenodecoll_shard2_replica_n4'," +
-        "              'leader': 'true'," +
-        "              'INDEX.sizeInBytes': 6.426125764846802E-8," +
-        "              'base_url': 'http://127.0.0.1:50418/solr'," +
-        "              'node_name': '127.0.0.1:50418_solr'," +
-        "              'state': 'active'," +
-        "              'type': 'NRT'," +
-        "              'shard': 'shard2'," +
-        "              'collection': 'utilizenodecoll'" +
-        "            }" +
-        "          }" +
-        "        ]" +
-        "      }" +
-        "    }," +
-        "    '127.0.0.1:50417_solr': {" +
-        "      'utilizenodecoll': {" +
-        "        'shard2': [" +
-        "          {" +
-        "            'core_node8': {" +
-        "              'base_url': 'http://127.0.0.1:50417/solr'," +
-        "              'node_name': '127.0.0.1:50417_solr'," +
-        "              'core': 'utilizenodecoll_shard2_replica_n6'," +
-        "              'state': 'active'," +
-        "              'type': 'NRT'," +
-        "              'INDEX.sizeInBytes': 6.426125764846802E-8," +
-        "              'shard': 'shard2'," +
-        "              'collection': 'utilizenodecoll'" +
-        "            }" +
-        "          }" +
-        "        ]" +
-        "      }" +
-        "    }," +
-        "    '127.0.0.1:50419_solr': {" +
-        "      'utilizenodecoll': {" +
-        "        'shard1': [" +
-        "          {" +
-        "            'core_node5': {" +
-        "              'base_url': 'http://127.0.0.1:50419/solr'," +
-        "              'node_name': '127.0.0.1:50419_solr'," +
-        "              'core': 'utilizenodecoll_shard1_replica_n2'," +
-        "              'state': 'active'," +
-        "              'type': 'NRT'," +
-        "              'INDEX.sizeInBytes': 6.426125764846802E-8," +
-        "              'shard': 'shard1'," +
-        "              'collection': 'utilizenodecoll'" +
-        "            }" +
-        "          }" +
-        "        ]" +
-        "      }" +
-        "    }," +
-        "    '127.0.0.1:50420_solr': {" +
-        "      'utilizenodecoll': {" +
-        "        'shard1': [" +
-        "          {" +
-        "            'core_node3': {" +
-        "              'core': 'utilizenodecoll_shard1_replica_n1'," +
-        "              'leader': 'true'," +
-        "              'INDEX.sizeInBytes': 6.426125764846802E-8," +
-        "              'base_url': 'http://127.0.0.1:50420/solr'," +
-        "              'node_name': '127.0.0.1:50420_solr'," +
-        "              'state': 'active'," +
-        "              'type': 'NRT'," +
-        "              'shard': 'shard1'," +
-        "              'collection': 'utilizenodecoll'" +
-        "            }" +
-        "          }" +
-        "        ]" +
-        "      }" +
-        "    }," +
-        "    '127.0.0.1:50443_solr': {}" +
-        "  }," +
-        "  'nodeValues': {" +
-        "    '127.0.0.1:50418_solr': {" +
-        "      'cores': 1," +
-        "      'freedisk': 187.70782089233398" +
-        "    }," +
-        "    '127.0.0.1:50417_solr': {" +
-        "      'cores': 1," +
-        "      'freedisk': 187.70782089233398" +
-        "    }," +
-        "    '127.0.0.1:50419_solr': {" +
-        "      'cores': 1," +
-        "      'freedisk': 187.70782089233398" +
-        "    }," +
-        "    '127.0.0.1:50420_solr': {" +
-        "      'cores': 1," +
-        "      'freedisk': 187.70782089233398" +
-        "    }," +
-        "    '127.0.0.1:50443_solr': {" +
-        "      'cores': 0," +
-        "      'freedisk': 187.70782089233398" +
-        "    }" +
-        "  }," +
-        "  'autoscalingJson': {" +
-        "    'cluster-preferences': [" +
-        "      {'minimize': 'cores', 'precision': 1}," +
-        "      {'maximize': 'freedisk'}" +
-        "    ]" +
-        "  }" +
-        "}";
-    Map jsonObj = (Map) Utils.fromJSONString(state);
+    Map jsonObj = (Map) TestPolicy2.loadFromResource("testUtilizeNodeFailure.json"); //(Map) Utils.fromJSONString(state);
     SolrCloudManager cloudManager = createCloudManager(jsonObj);
     Suggester suggester = createSuggester(cloudManager, jsonObj, null);
     int count = 0;
@@ -3122,76 +2921,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testUtilizeNodeFailure2() throws Exception {
-    String state = "{  'liveNodes':[" +
-        "  '127.0.0.1:51075_solr'," +
-        "  '127.0.0.1:51076_solr'," +
-        "  '127.0.0.1:51077_solr'," +
-        "  '127.0.0.1:51097_solr']," +
-        "  'suggester':{" +
-        "    'action':'MOVEREPLICA'," +
-        "    'hints':{'TARGET_NODE':['127.0.0.1:51097_solr']}}," +
-        "  'replicaInfo':{" +
-        "    '127.0.0.1:51076_solr':{'utilizenodecoll':{'shard1':[{'core_node5':{" +
-        "      'base_url':'https://127.0.0.1:51076/solr'," +
-        "      'node_name':'127.0.0.1:51076_solr'," +
-        "      'core':'utilizenodecoll_shard1_replica_n2'," +
-        "      'state':'active'," +
-        "      'type':'NRT'," +
-        "      'INDEX.sizeInBytes':6.426125764846802E-8," +
-        "      'shard':'shard1'," +
-        "      'collection':'utilizenodecoll'}}]}}," +
-        "    '127.0.0.1:51077_solr':{'utilizenodecoll':{" +
-        "      'shard2':[{'core_node8':{" +
-        "        'base_url':'https://127.0.0.1:51077/solr'," +
-        "        'node_name':'127.0.0.1:51077_solr'," +
-        "        'core':'utilizenodecoll_shard2_replica_n6'," +
-        "        'state':'active'," +
-        "        'type':'NRT'," +
-        "        'INDEX.sizeInBytes':6.426125764846802E-8," +
-        "        'shard':'shard2'," +
-        "        'collection':'utilizenodecoll'}}]," +
-        "      'shard1':[{'core_node3':{" +
-        "        'core':'utilizenodecoll_shard1_replica_n1'," +
-        "        'leader':'true'," +
-        "        'INDEX.sizeInBytes':6.426125764846802E-8," +
-        "        'base_url':'https://127.0.0.1:51077/solr'," +
-        "        'node_name':'127.0.0.1:51077_solr'," +
-        "        'state':'active'," +
-        "        'type':'NRT'," +
-        "        'shard':'shard1'," +
-        "        'collection':'utilizenodecoll'}}]}}," +
-        "    '127.0.0.1:51097_solr':{}," +
-        "    '127.0.0.1:51075_solr':{'utilizenodecoll':{'shard2':[{'core_node7':{" +
-        "      'core':'utilizenodecoll_shard2_replica_n4'," +
-        "      'leader':'true'," +
-        "      'INDEX.sizeInBytes':6.426125764846802E-8," +
-        "      'base_url':'https://127.0.0.1:51075/solr'," +
-        "      'node_name':'127.0.0.1:51075_solr'," +
-        "      'state':'active'," +
-        "      'type':'NRT'," +
-        "      'shard':'shard2'," +
-        "      'collection':'utilizenodecoll'}}]}}}," +
-        "  'nodeValues':{" +
-        "    '127.0.0.1:51076_solr':{" +
-        "      'cores':1," +
-        "      'freedisk':188.7262191772461}," +
-        "    '127.0.0.1:51077_solr':{" +
-        "      'cores':2," +
-        "      'freedisk':188.7262191772461}," +
-        "    '127.0.0.1:51097_solr':{" +
-        "      'cores':0," +
-        "      'freedisk':188.7262191772461}," +
-        "    '127.0.0.1:51075_solr':{" +
-        "      'cores':1," +
-        "      'freedisk':188.7262191772461}}," +
-        "  'autoscalingJson':{" +
-        "    'cluster-preferences':[" +
-        "      {" +
-        "        'minimize':'cores'," +
-        "        'precision':1}," +
-        "      {'maximize':'freedisk'}]" +
-        "    }}";
-    Map jsonObj = (Map) Utils.fromJSONString(state);
+    Map jsonObj = (Map) TestPolicy2.loadFromResource("testUtilizeNodeFailure2.json");
     SolrCloudManager cloudManager = createCloudManager(jsonObj);
     Suggester suggester = createSuggester(cloudManager, jsonObj, null);
     int count = 0;
@@ -3207,271 +2937,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   //SOLR-12358
-  public void testSortError() {
+  public void testSortError() throws IOException {
     Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString("{cluster-preferences: [{minimize : cores, precision:1}, " +
         "{maximize : freedisk, precision: 50}, " +
         "{minimize: sysLoadAvg}]}"));
-    String rowsData = "{'sortedNodes':[" +
-        "    {" +
-        "      'node':'solr-01:8983_solr'," +
-        "      'replicas':{}," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':2}," +
-        "        {'freedisk':1734.5261459350586}," +
-        "        {'sysLoadAvg':35.0}," +
-        "        {'node':'solr-01:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-07:8983_solr'," +
-        "      'replicas':{}," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1721.5669250488281}," +
-        "        {'sysLoadAvg':10.0}," +
-        "        {'node':'solr-07:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-08:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1764.9518203735352}," +
-        "        {'sysLoadAvg':330.0}," +
-        "        {'node':'solr-08:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-25:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1779.7792778015137}," +
-        "        {'sysLoadAvg':304.0}," +
-        "        {'node':'solr-25:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-15:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1697.5930519104004}," +
-        "        {'sysLoadAvg':277.0}," +
-        "        {'node':'solr-15:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-13:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':2}," +
-        "        {'freedisk':1755.1909484863281}," +
-        "        {'sysLoadAvg':265.0}," +
-        "        {'node':'solr-13:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-14:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1757.6035423278809}," +
-        "        {'sysLoadAvg':61.0}," +
-        "        {'node':'solr-14:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-16:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1746.081386566162}," +
-        "        {'sysLoadAvg':260.0}," +
-        "        {'node':'solr-16:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-04:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':2}," +
-        "        {'freedisk':1708.7230529785156}," +
-        "        {'sysLoadAvg':216.0}," +
-        "        {'node':'solr-04:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-06:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1688.3182678222656}," +
-        "        {'sysLoadAvg':385.0}," +
-        "        {'node':'solr-06:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-02:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':6}," +
-        "        {'freedisk':1778.226963043213}," +
-        "        {'sysLoadAvg':369.0}," +
-        "        {'node':'solr-02:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-05:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1741.9401931762695}," +
-        "        {'sysLoadAvg':354.0}," +
-        "        {'node':'solr-05:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-23:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1718.854579925537}," +
-        "        {'sysLoadAvg':329.0}," +
-        "        {'node':'solr-23:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-24:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1733.6669311523438}," +
-        "        {'sysLoadAvg':327.0}," +
-        "        {'node':'solr-24:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-09:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1714.6191711425781}," +
-        "        {'sysLoadAvg':278.0}," +
-        "        {'node':'solr-09:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-10:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1755.3038482666016}," +
-        "        {'sysLoadAvg':266.0}," +
-        "        {'node':'solr-10:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-28:8983_solr'," +
-        "      'isLive':false," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1691.3830909729004}," +
-        "        {'sysLoadAvg':261.0}," +
-        "        {'node':'solr-28:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-29:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':2}," +
-        "        {'freedisk':1706.797966003418}," +
-        "        {'sysLoadAvg':252.99999999999997}," +
-        "        {'node':'solr-29:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-32:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1762.432300567627}," +
-        "        {'sysLoadAvg':221.0}," +
-        "        {'node':'solr-32:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-21:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1760.9801979064941}," +
-        "        {'sysLoadAvg':213.0}," +
-        "        {'node':'solr-21:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-22:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1780.5297241210938}," +
-        "        {'sysLoadAvg':209.0}," +
-        "        {'node':'solr-22:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-31:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1700.1481628417969}," +
-        "        {'sysLoadAvg':211.0}," +
-        "        {'node':'solr-31:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-33:8983_solr'," +
-        "      'isLive':false," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1748.1132926940918}," +
-        "        {'sysLoadAvg':199.0}," +
-        "        {'node':'solr-33:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-36:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1776.197639465332}," +
-        "        {'sysLoadAvg':193.0}," +
-        "        {'node':'solr-36:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-35:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1746.7729606628418}," +
-        "        {'sysLoadAvg':191.0}," +
-        "        {'node':'solr-35:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-12:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1713.287540435791}," +
-        "        {'sysLoadAvg':175.0}," +
-        "        {'node':'solr-12:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-11:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1736.784511566162}," +
-        "        {'sysLoadAvg':169.0}," +
-        "        {'node':'solr-11:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-35:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1766.9416885375977}," +
-        "        {'sysLoadAvg':155.0}," +
-        "        {'node':'solr-35:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-17:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1764.3425407409668}," +
-        "        {'sysLoadAvg':139.0}," +
-        "        {'node':'solr-17:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-18:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':2}," +
-        "        {'freedisk':1757.0613975524902}," +
-        "        {'sysLoadAvg':132.0}," +
-        "        {'node':'solr-18:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-20:8983_solr'," +
-        "      'isLive':false," +
-        "      'attributes':[" +
-        "        {'cores':3}," +
-        "        {'freedisk':1747.4205322265625}," +
-        "        {'sysLoadAvg':126.0}," +
-        "        {'node':'solr-20:8983_solr'}]}," +
-        "    {" +
-        "      'node':'solr-27:8983_solr'," +
-        "      'isLive':true," +
-        "      'attributes':[" +
-        "        {'cores':4}," +
-        "        {'freedisk':1721.0442085266113}," +
-        "        {'sysLoadAvg':118.0}," +
-        "        {'node':'solr-27:8983_solr'}]}]}";
-
-    List l = (List) ((Map) Utils.fromJSONString(rowsData)).get("sortedNodes");
+
+    List l = (List) TestPolicy2.loadFromResource("testSortError.json");
     List<Variable.Type> params = new ArrayList<>();
     params.add(CORES);
     params.add(Variable.Type.FREEDISK);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6adeb5bc/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
index 5365e28..b274974 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
@@ -18,6 +18,7 @@
 package org.apache.solr.client.solrj.cloud.autoscaling;
 
 import java.io.IOException;
+import java.io.InputStream;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -527,164 +528,10 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
       }
     };
   }
-  public void testSysPropSuggestions() {
-    String diagnostics = "{" +
-        "  'diagnostics': {" +
-        "    'sortedNodes': [" +
-        "      {" +
-        "        'node': '127.0.0.1:63191_solr'," +
-        "        'isLive': true," +
-        "        'cores': 3.0," +
-        "        'sysprop.zone': 'east'," +
-        "        'freedisk': 1727.1459312438965," +
-        "        'heapUsage': 24.97510064011647," +
-        "        'sysLoadAvg': 272.75390625," +
-        "        'totaldisk': 1037.938980102539," +
-        "        'replicas': {" +
-        "          'zonesTest': {" +
-        "            'shard1': [" +
-        "              {" +
-        "                'core_node5': {" +
-        "                  'core': 'zonesTest_shard1_replica_n2'," +
-        "                  'leader': 'true'," +
-        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
-        "                  'node_name': '127.0.0.1:63191_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'zonesTest'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node7': {" +
-        "                  'core': 'zonesTest_shard1_replica_n4'," +
-        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
-        "                  'node_name': '127.0.0.1:63191_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'zonesTest'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node12': {" +
-        "                  'core': 'zonesTest_shard1_replica_n10'," +
-        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
-        "                  'node_name': '127.0.0.1:63191_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'zonesTest'" +
-        "                }" +
-        "              }" +
-        "            ]" +
-        "          }" +
-        "        }" +
-        "      }," +
-        "      {" +
-        "        'node': '127.0.0.1:63192_solr'," +
-        "        'isLive': true," +
-        "        'cores': 3.0," +
-        "        'sysprop.zone': 'east'," +
-        "        'freedisk': 1727.1459312438965," +
-        "        'heapUsage': 24.98878807983566," +
-        "        'sysLoadAvg': 272.75390625," +
-        "        'totaldisk': 1037.938980102539," +
-        "        'replicas': {" +
-        "          'zonesTest': {" +
-        "            'shard2': [" +
-        "              {" +
-        "                'core_node3': {" +
-        "                  'core': 'zonesTest_shard1_replica_n1'," +
-        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
-        "                  'node_name': '127.0.0.1:63192_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard2'," +
-        "                  'collection': 'zonesTest'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node9': {" +
-        "                  'core': 'zonesTest_shard1_replica_n6'," +
-        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
-        "                  'node_name': '127.0.0.1:63192_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard2'," +
-        "                  'collection': 'zonesTest'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node11': {" +
-        "                  'core': 'zonesTest_shard1_replica_n8'," +
-        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
-        "                  'node_name': '127.0.0.1:63192_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard2'," +
-        "                  'collection': 'zonesTest'" +
-        "                }" +
-        "              }" +
-        "            ]" +
-        "          }" +
-        "        }" +
-        "      }," +
-        "      {" +
-        "        'node': '127.0.0.1:63219_solr'," +
-        "        'isLive': true," +
-        "        'cores': 0.0," +
-        "        'sysprop.zone': 'west'," +
-        "        'freedisk': 1768.6174201965332," +
-        "        'heapUsage': 24.98878807983566," +
-        "        'sysLoadAvg': 272.75390625," +
-        "        'totaldisk': 1037.938980102539," +
-        "        'replicas': {}" +
-        "      }," +
-        "      {" +
-        "        'node': '127.0.0.1:63229_solr'," +
-        "        'isLive': true," +
-        "        'cores': 0.0," +
-        "        'sysprop.zone': 'west'," +
-        "        'freedisk': 1768.6174201965332," +
-        "        'heapUsage': 24.98878807983566," +
-        "        'sysLoadAvg': 272.75390625," +
-        "        'totaldisk': 1037.938980102539," +
-        "        'replicas': {}" +
-        "      }" +
-        "    ]," +
-        "    'liveNodes': [" +
-        "      '127.0.0.1:63191_solr'," +
-        "      '127.0.0.1:63192_solr'," +
-        "      '127.0.0.1:63219_solr'," +
-        "      '127.0.0.1:63229_solr'" +
-        "    ]," +
-        "    'config': {" +
-        "      'cluster-preferences': [" +
-        "        {'minimize': 'cores', 'precision': 1}," +
-        "        {'maximize': 'freedisk', 'precision': 100}," +
-        "        {'minimize': 'sysLoadAvg', 'precision': 10}" +
-        "      ]," +
-        "      'cluster-policy': [" +
-        "        {'replica': '<3', 'shard': '#EACH', 'sysprop.zone': [east, west]}" +
-        "      ]" +
-        "    }" +
-        "  }" +
-        "}";
 
-    Map<String, Object> m = (Map<String, Object>) Utils.fromJSONString(diagnostics);
+  public void testSysPropSuggestions() throws IOException {
+
+    Map<String, Object> m = (Map<String, Object>) loadFromResource("testSysPropSuggestions.json");
 
     Map<String, Object> conf = (Map<String, Object>) Utils.getObjectByPath(m, false, "diagnostics/config");
     Policy policy = new Policy(conf);
@@ -704,5 +551,10 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     }
   }
 
+  public static Object loadFromResource(String file) throws IOException {
+    try (InputStream is = TestPolicy2.class.getResourceAsStream("/solrj/solr/autoscaling/" + file)) {
+      return Utils.fromJSON(is);
+    }
+  }
 
 }


[07/29] lucene-solr:jira/http2: LUCENE-8511: MultiFields.getIndexedFields optimize to not call getMergedFieldInfos

Posted by da...@apache.org.
LUCENE-8511: MultiFields.getIndexedFields optimize to not call getMergedFieldInfos


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4ccf0fb8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4ccf0fb8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4ccf0fb8

Branch: refs/heads/jira/http2
Commit: 4ccf0fb8f6ce269de8b4501fca201f5b4763cfe7
Parents: 60569fb
Author: David Smiley <ds...@apache.org>
Authored: Fri Sep 21 23:47:10 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Fri Sep 21 23:47:10 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  3 +++
 .../org/apache/lucene/index/MultiFields.java    | 27 ++++++++------------
 2 files changed, 13 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4ccf0fb8/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 70badd8..d305759 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -170,6 +170,9 @@ Optimizations
 * LUCENE-8448: Boolean queries now propagates the mininum score to their sub-scorers.
   (Jim Ferenczi, Adrien Grand)
 
+* LUCENE-8511: MultiFields.getIndexedFields is now optimized; does not call getMergedFieldInfos
+  (David Smiley)
+
 ======================= Lucene 7.6.0 =======================
 
 Build

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4ccf0fb8/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
index 19078a8..32ce2fa 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
@@ -21,12 +21,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
 
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -266,7 +267,8 @@ public final class MultiFields extends Fields {
   public static FieldInfos getMergedFieldInfos(IndexReader reader) {
     final String softDeletesField = reader.leaves().stream()
         .map(l -> l.reader().getFieldInfos().getSoftDeletesField())
-        .filter(Objects::nonNull).findAny().orElse(null);
+        .filter(Objects::nonNull)
+        .findAny().orElse(null);
     final FieldInfos.Builder builder = new FieldInfos.Builder(new FieldInfos.FieldNumbers(softDeletesField));
     for(final LeafReaderContext ctx : reader.leaves()) {
       builder.add(ctx.reader().getFieldInfos());
@@ -274,22 +276,13 @@ public final class MultiFields extends Fields {
     return builder.finish();
   }
 
-  /** Call this to get the (merged) FieldInfos representing the
-   *  set of indexed fields <b>only</b> for a composite reader. 
-   *  <p>
-   *  NOTE: the returned field numbers will likely not
-   *  correspond to the actual field numbers in the underlying
-   *  readers, and codec metadata ({@link FieldInfo#getAttribute(String)}
-   *  will be unavailable.
-   */
+  /** Returns a set of names of fields that have a terms index.  The order is undefined. */
   public static Collection<String> getIndexedFields(IndexReader reader) {
-    final Collection<String> fields = new HashSet<>();
-    for(final FieldInfo fieldInfo : getMergedFieldInfos(reader)) {
-      if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
-        fields.add(fieldInfo.name);
-      }
-    }
-    return fields;
+    return reader.leaves().stream()
+        .flatMap(l -> StreamSupport.stream(l.reader().getFieldInfos().spliterator(), false)
+        .filter(fi -> fi.getIndexOptions() != IndexOptions.NONE))
+        .map(fi -> fi.name)
+        .collect(Collectors.toSet());
   }
 
   private static class LeafReaderFields extends Fields {


[29/29] lucene-solr:jira/http2: Merge branch 'master' into jira/http2

Posted by da...@apache.org.
Merge branch 'master' into jira/http2


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/be332a26
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/be332a26
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/be332a26

Branch: refs/heads/jira/http2
Commit: be332a26b0061a2f661bb82a12a5ce44cbbcc0be
Parents: 1664918 9481c1f
Author: Cao Manh Dat <da...@apache.org>
Authored: Fri Sep 28 09:18:20 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Fri Sep 28 09:18:20 2018 +0700

----------------------------------------------------------------------
 dev-tools/doap/lucene.rdf                       |   7 +
 dev-tools/doap/solr.rdf                         |   7 +
 dev-tools/scripts/prep-solr-ref-guide-rc.sh     |   4 +-
 dev-tools/scripts/smokeTestRelease.py           |  13 +-
 lucene/CHANGES.txt                              |   8 +
 .../index/TestBackwardsCompatibility.java       |   7 +-
 .../org/apache/lucene/index/index.7.5.0-cfs.zip | Bin 0 -> 15615 bytes
 .../apache/lucene/index/index.7.5.0-nocfs.zip   | Bin 0 -> 15629 bytes
 .../org/apache/lucene/index/sorted.7.5.0.zip    | Bin 0 -> 82239 bytes
 lucene/common-build.xml                         |   1 -
 .../org/apache/lucene/index/IndexWriter.java    |  27 +-
 .../org/apache/lucene/index/MergeState.java     |  44 +-
 .../org/apache/lucene/index/MultiFields.java    |  27 +-
 .../apache/lucene/search/TermInSetQuery.java    |   2 +-
 .../apache/lucene/index/TestIndexSorting.java   |  49 +-
 .../document/BaseLatLonShapeTestCase.java       |   2 +-
 .../document/TestLatLonPolygonShapeQueries.java |   8 +-
 .../spatial3d/geom/GeoComplexPolygon.java       | 115 +--
 .../spatial3d/geom/GeoPolygonFactory.java       |  15 +-
 .../lucene/spatial3d/geom/GeoPolygonTest.java   |  18 +
 solr/CHANGES.txt                                |  27 +-
 solr/bin/solr.cmd                               |   4 +-
 .../ExtractingRequestHandlerTest.java           |   6 +-
 .../org/apache/solr/cloud/ZkShardTerms.java     |   8 +-
 .../cloud/api/collections/AddReplicaCmd.java    | 325 +++++---
 .../solr/cloud/api/collections/Assign.java      | 372 +++++----
 .../api/collections/CreateCollectionCmd.java    |  34 +-
 .../cloud/api/collections/CreateShardCmd.java   | 157 +---
 .../cloud/api/collections/MoveReplicaCmd.java   |   2 +-
 .../OverseerCollectionMessageHandler.java       |   2 +-
 .../cloud/api/collections/ReplaceNodeCmd.java   |  35 +-
 .../solr/cloud/api/collections/RestoreCmd.java  |  16 +-
 .../cloud/api/collections/SplitShardCmd.java    |  17 +-
 .../cloud/autoscaling/ComputePlanAction.java    |   8 +-
 .../cloud/autoscaling/IndexSizeTrigger.java     |  12 +-
 .../solr/cloud/overseer/ReplicaMutator.java     |   4 +-
 .../java/org/apache/solr/core/SolrConfig.java   |   2 +-
 .../solr/handler/admin/CollectionsHandler.java  |   9 +-
 .../solr/handler/admin/MetricsHandler.java      |   4 +-
 .../handler/admin/MetricsHistoryHandler.java    |   4 +-
 .../apache/solr/metrics/SolrMetricManager.java  |  67 +-
 .../metrics/reporters/SolrSlf4jReporter.java    |   4 +-
 .../reporters/solr/SolrClusterReporter.java     |   8 +-
 .../solr/search/ExtendedDismaxQParser.java      |  89 ++-
 .../org/apache/solr/cloud/AddReplicaTest.java   |  92 ++-
 .../org/apache/solr/cloud/CloudTestUtils.java   |   6 +-
 .../apache/solr/cloud/MoveReplicaHDFSTest.java  |   3 +-
 .../apache/solr/cloud/MultiThreadedOCPTest.java |   2 +-
 .../TestLeaderInitiatedRecoveryThread.java      |   1 +
 .../solr/cloud/TestSkipOverseerOperations.java  |   3 +
 .../TestSolrCloudWithDelegationTokens.java      |   2 +-
 .../apache/solr/cloud/TestWithCollection.java   |   2 +
 .../solr/cloud/UnloadDistributedZkTest.java     |   1 +
 .../org/apache/solr/cloud/ZkShardTermsTest.java |  14 +-
 .../CollectionTooManyReplicasTest.java          |  10 +-
 .../CollectionsAPIAsyncDistributedZkTest.java   |   1 +
 .../cloud/api/collections/ShardSplitTest.java   |   2 +-
 .../cloud/autoscaling/IndexSizeTriggerTest.java |  10 +-
 .../cloud/autoscaling/ScheduledTriggerTest.java |   1 +
 .../solr/cloud/autoscaling/TestPolicyCloud.java |   1 +
 .../cloud/autoscaling/sim/SimCloudManager.java  |  66 +-
 .../sim/SimClusterStateProvider.java            | 797 ++++++++++++++-----
 .../autoscaling/sim/SimDistribStateManager.java |   7 +
 .../autoscaling/sim/SimNodeStateProvider.java   |  21 +-
 .../sim/TestSimExecutePlanAction.java           |   4 +-
 .../autoscaling/sim/TestSimExtremeIndexing.java | 163 ++++
 .../autoscaling/sim/TestSimNodeLostTrigger.java |   2 +-
 .../autoscaling/sim/TestSimPolicyCloud.java     |  13 +-
 .../sim/TestSimTriggerIntegration.java          |   8 +-
 .../solr/handler/TestReplicationHandler.java    |   2 +-
 .../DistributedDebugComponentTest.java          |   4 +-
 .../solr/metrics/SolrMetricManagerTest.java     |  14 -
 .../solr/search/TestExtendedDismaxParser.java   |  33 +-
 solr/solr-ref-guide/src/collections-api.adoc    |  68 +-
 solr/solr-ref-guide/src/meta-docs/publish.adoc  |   7 +-
 .../cloud/autoscaling/FreeDiskVariable.java     |  12 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |  35 +-
 .../solrj/cloud/autoscaling/PolicyHelper.java   |  91 ++-
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |   4 +-
 .../client/solrj/cloud/autoscaling/Row.java     |   2 +-
 .../solrj/cloud/autoscaling/Suggester.java      |   2 +-
 .../solrj/cloud/autoscaling/Suggestion.java     |   5 +
 .../solrj/request/CollectionAdminRequest.java   |  53 ++
 .../solr/common/ConditionalMapWriter.java       |   2 +-
 .../java/org/apache/solr/common/MapWriter.java  |  10 +
 .../solr/common/cloud/ReplicaPosition.java      |   2 +-
 .../java/org/apache/solr/common/util/Utils.java |  35 +-
 .../collections.collection.Commands.json        |   3 +-
 .../testAutoScalingHandlerFailure.json          | 141 ++++
 .../testScheduledTriggerFailure.json            |  52 ++
 .../solrj/solr/autoscaling/testSortError.json   | 225 ++++++
 .../autoscaling/testSuggestionsRebalance2.json  | 130 +++
 .../testSuggestionsRebalanceOnly.json           | 105 +++
 .../autoscaling/testSysPropSuggestions.json     | 119 +++
 .../autoscaling/testUtilizeNodeFailure.json     |  69 ++
 .../autoscaling/testUtilizeNodeFailure2.json    |  66 ++
 .../solr/client/solrj/SolrExceptionTest.java    |   3 +
 .../solrj/beans/TestDocumentObjectBinder.java   |   4 +
 .../solrj/cloud/autoscaling/TestPolicy.java     | 641 ++-------------
 .../solrj/cloud/autoscaling/TestPolicy2.java    | 387 ++-------
 .../solrj/impl/CloudSolrClientBuilderTest.java  |   6 +
 .../CloudSolrClientMultiConstructorTest.java    |   3 +
 .../ConcurrentUpdateSolrClientBuilderTest.java  |   1 +
 .../client/solrj/impl/HttpClientUtilTest.java   |   6 +-
 .../client/solrj/impl/LBHttpSolrClientTest.java |   2 +
 .../solrj/io/stream/MathExpressionTest.java     |   1 +
 .../stream/StreamExpressionToExpessionTest.java |   2 +
 .../StreamExpressionToExplanationTest.java      |   2 +
 .../request/TestCollectionAdminRequest.java     |   5 +
 .../solrj/request/TestUpdateRequestCodec.java   |   5 +-
 .../solrj/request/TestV1toV2ApiMapper.java      |  11 +-
 .../solrj/response/QueryResponseTest.java       |   6 +-
 .../response/TestDelegationTokenResponse.java   |   2 +
 .../solr/common/TestToleratedUpdateError.java   |   5 +-
 .../solr/common/params/ShardParamsTest.java     |   5 +-
 .../apache/solr/common/util/NamedListTest.java  |   7 +-
 .../solr/common/util/TestFastInputStream.java   |   1 +
 .../solr/common/util/TestNamedListCodec.java    |  11 +-
 118 files changed, 3238 insertions(+), 1971 deletions(-)
----------------------------------------------------------------------



[12/29] lucene-solr:jira/http2: Add 7.5.0 back compat test indexes

Posted by da...@apache.org.
Add 7.5.0 back compat test indexes


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/baf40d5f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/baf40d5f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/baf40d5f

Branch: refs/heads/jira/http2
Commit: baf40d5f0553eebfdaad030c42fc4283fe585d69
Parents: cecf31e
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Sep 24 10:10:36 2018 +0200
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Sep 24 10:10:36 2018 +0200

----------------------------------------------------------------------
 .../lucene/index/TestBackwardsCompatibility.java   |   7 +++++--
 .../org/apache/lucene/index/index.7.5.0-cfs.zip    | Bin 0 -> 15615 bytes
 .../org/apache/lucene/index/index.7.5.0-nocfs.zip  | Bin 0 -> 15629 bytes
 .../test/org/apache/lucene/index/sorted.7.5.0.zip  | Bin 0 -> 82239 bytes
 4 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/baf40d5f/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index ce22bb5..b1e7466 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -302,7 +302,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     "7.3.1-cfs",
     "7.3.1-nocfs",
     "7.4.0-cfs",
-    "7.4.0-nocfs"
+    "7.4.0-nocfs",
+    "7.5.0-cfs",
+    "7.5.0-nocfs"
   };
 
   public static String[] getOldNames() {
@@ -317,7 +319,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     "sorted.7.2.1",
     "sorted.7.3.0",
     "sorted.7.3.1",
-    "sorted.7.4.0"
+    "sorted.7.4.0",
+    "sorted.7.5.0"
   };
 
   public static String[] getOldSortedNames() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/baf40d5f/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-cfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-cfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-cfs.zip
new file mode 100644
index 0000000..0e24be4
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-cfs.zip differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/baf40d5f/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-nocfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-nocfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-nocfs.zip
new file mode 100644
index 0000000..f975332
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.5.0-nocfs.zip differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/baf40d5f/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.5.0.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.5.0.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.5.0.zip
new file mode 100644
index 0000000..2d91e9a
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.5.0.zip differ


[17/29] lucene-solr:jira/http2: SOLR-12028: Reduce test data set for TestLatLonPolygonShapeQueries.testRandomBig

Posted by da...@apache.org.
SOLR-12028: Reduce test data set for TestLatLonPolygonShapeQueries.testRandomBig


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2b4717c6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2b4717c6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2b4717c6

Branch: refs/heads/jira/http2
Commit: 2b4717c6f4e7d29dfa56ba2d6e5c64d9cc3fbbdc
Parents: 2bad3c4
Author: Nicholas Knize <nk...@gmail.com>
Authored: Tue Sep 25 09:26:20 2018 -0500
Committer: Nicholas Knize <nk...@gmail.com>
Committed: Tue Sep 25 09:26:20 2018 -0500

----------------------------------------------------------------------
 .../org/apache/lucene/document/BaseLatLonShapeTestCase.java  | 2 +-
 .../lucene/document/TestLatLonPolygonShapeQueries.java       | 8 ++++++--
 2 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b4717c6/lucene/sandbox/src/test/org/apache/lucene/document/BaseLatLonShapeTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/document/BaseLatLonShapeTestCase.java b/lucene/sandbox/src/test/org/apache/lucene/document/BaseLatLonShapeTestCase.java
index 191e2cb..7af5177 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/document/BaseLatLonShapeTestCase.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/document/BaseLatLonShapeTestCase.java
@@ -149,7 +149,7 @@ public abstract class BaseLatLonShapeTestCase extends LuceneTestCase {
     doTestRandom(50000);
   }
 
-  private void doTestRandom(int count) throws Exception {
+  protected void doTestRandom(int count) throws Exception {
     int numShapes = atLeast(count);
     ShapeType type = getShapeType();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b4717c6/lucene/sandbox/src/test/org/apache/lucene/document/TestLatLonPolygonShapeQueries.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/document/TestLatLonPolygonShapeQueries.java b/lucene/sandbox/src/test/org/apache/lucene/document/TestLatLonPolygonShapeQueries.java
index ce76a82..03837a0 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/document/TestLatLonPolygonShapeQueries.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/document/TestLatLonPolygonShapeQueries.java
@@ -23,10 +23,8 @@ import org.apache.lucene.geo.Polygon;
 import org.apache.lucene.geo.Polygon2D;
 import org.apache.lucene.geo.Tessellator;
 import org.apache.lucene.index.PointValues.Relation;
-import org.apache.lucene.util.LuceneTestCase;
 
 /** random bounding box and polygon query tests for random indexed {@link Polygon} types */
-@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
 public class TestLatLonPolygonShapeQueries extends BaseLatLonShapeTestCase {
 
   protected final PolygonValidator VALIDATOR = new PolygonValidator();
@@ -99,4 +97,10 @@ public class TestLatLonPolygonShapeQueries extends BaseLatLonShapeTestCase {
       return queryRelation == QueryRelation.INTERSECTS ? false : true;
     }
   }
+
+  @Nightly
+  @Override
+  public void testRandomBig() throws Exception {
+    doTestRandom(25000);
+  }
 }


[05/29] lucene-solr:jira/http2: SOLR-9317: ADDREPLICA command should be able to add more than one replica to a collection, shard at a time.

Posted by da...@apache.org.
SOLR-9317: ADDREPLICA command should be able to add more than one replica to a collection,shard at a time.

The API now supports 'nrtReplicas', 'tlogReplicas', 'pullReplicas' parameters as well 'createNodeSet' parameter. As part of this change, the CREATESHARD API now delegates placing replicas entirely to the ADDREPLICA command and uses the new parameters to add all the replicas in one API call.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4bcace57
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4bcace57
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4bcace57

Branch: refs/heads/jira/http2
Commit: 4bcace571ee1e512b2ca4aa3d93bc7bd522b55fe
Parents: af2de93
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Fri Sep 21 15:12:21 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Fri Sep 21 15:12:21 2018 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   5 +
 .../cloud/api/collections/AddReplicaCmd.java    | 334 +++++++++++++------
 .../solr/cloud/api/collections/Assign.java      |  22 +-
 .../cloud/api/collections/CreateShardCmd.java   | 157 +++------
 .../cloud/api/collections/MoveReplicaCmd.java   |   2 +-
 .../OverseerCollectionMessageHandler.java       |   2 +-
 .../cloud/api/collections/ReplaceNodeCmd.java   |   2 +-
 .../solr/handler/admin/CollectionsHandler.java  |   9 +-
 .../org/apache/solr/cloud/AddReplicaTest.java   |  90 ++++-
 .../CollectionTooManyReplicasTest.java          |   2 +-
 .../cloud/api/collections/ShardSplitTest.java   |   2 +-
 .../sim/SimClusterStateProvider.java            |  93 ++++--
 .../autoscaling/sim/TestSimPolicyCloud.java     |   2 +-
 solr/solr-ref-guide/src/collections-api.adoc    |  68 +++-
 .../solrj/request/CollectionAdminRequest.java   |  53 +++
 15 files changed, 557 insertions(+), 286 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 249f681..ee1d7b7 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -90,6 +90,11 @@ New Features
   error.  Previously, the collapsing behavior was unreliable and undefined despite no explicit error.
   (Munendra S N, David Smiley)
 
+* SOLR-9317: ADDREPLICA command should be able to add more than one replica to a collection,shard at a time.
+  The API now supports 'nrtReplicas', 'tlogReplicas', 'pullReplicas' parameters as well 'createNodeSet' parameter.
+  As part of this change, the CREATESHARD API now delegates placing replicas entirely to the ADDREPLICA command
+  and uses the new parameters to add all the replicas in one API call. (shalin)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
index c9dbaec..f128c2e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
@@ -20,14 +20,17 @@ package org.apache.solr.cloud.api.collections;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
@@ -41,6 +44,7 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -54,10 +58,14 @@ import org.apache.solr.handler.component.ShardHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
 import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
 import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
 import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
@@ -79,28 +87,116 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     addReplica(state, message, results, null);
   }
 
-  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+  List<ZkNodeProps> addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
       throws IOException, InterruptedException {
     log.debug("addReplica() : {}", Utils.toJSONString(message));
 
     String collectionName = message.getStr(COLLECTION_PROP);
+    String shard = message.getStr(SHARD_ID_PROP);
+
     DocCollection coll = clusterState.getCollection(collectionName);
+    if (coll == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collectionName + " does not exist");
+    }
+    if (coll.getSlice(shard) == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "Collection: " + collectionName + " shard: " + shard + " does not exist");
+    }
 
     boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
     boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
     final String asyncId = message.getStr(ASYNC);
 
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    message = assignReplicaDetails(ocmh.cloudManager, clusterState, message, sessionWrapper);
-
     String node = message.getStr(CoreAdminParams.NODE);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
+    String createNodeSetStr = message.getStr(CREATE_NODE_SET);
+
+    if (node != null && createNodeSetStr != null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Both 'node' and 'createNodeSet' parameters cannot be specified together.");
+    }
+
     int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
     boolean parallel = message.getBool("parallel", false);
 
+    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
+    EnumMap<Replica.Type, Integer> replicaTypesVsCount = new EnumMap<>(Replica.Type.class);
+    replicaTypesVsCount.put(Replica.Type.NRT, message.getInt(NRT_REPLICAS, replicaType == Replica.Type.NRT ? 1 : 0));
+    replicaTypesVsCount.put(Replica.Type.TLOG, message.getInt(TLOG_REPLICAS, replicaType == Replica.Type.TLOG ? 1 : 0));
+    replicaTypesVsCount.put(Replica.Type.PULL, message.getInt(PULL_REPLICAS, replicaType == Replica.Type.PULL ? 1 : 0));
+
+    int totalReplicas = 0;
+    for (Map.Entry<Replica.Type, Integer> entry : replicaTypesVsCount.entrySet()) {
+      totalReplicas += entry.getValue();
+    }
+    if (totalReplicas > 1)  {
+      if (message.getStr(CoreAdminParams.NAME) != null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create " + totalReplicas + " replicas if 'name' parameter is specified");
+      }
+      if (message.getStr(CoreAdminParams.CORE_NODE_NAME) != null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create " + totalReplicas + " replicas if 'coreNodeName' parameter is specified");
+      }
+    }
+
+    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+    List<CreateReplica> createReplicas = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, replicaTypesVsCount, sessionWrapper)
+        .stream()
+        .map(replicaPosition -> assignReplicaDetails(ocmh.cloudManager, clusterState, message, replicaPosition))
+        .collect(Collectors.toList());
+
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    // For tracking async calls.
+    Map<String,String> requestMap = new HashMap<>();
+
+    for (CreateReplica createReplica : createReplicas) {
+      assert createReplica.coreName != null;
+      ModifiableSolrParams params = getReplicaParams(clusterState, message, results, collectionName, coll, skipCreateReplicaInClusterState, asyncId, shardHandler, createReplica);
+      ocmh.sendShardRequest(createReplica.node, params, shardHandler, asyncId, requestMap);
+    }
+
+    Runnable runnable = () -> {
+      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
+      for (CreateReplica replica : createReplicas) {
+        ocmh.waitForCoreNodeName(collectionName, replica.node, replica.coreName);
+      }
+
+      if (sessionWrapper.get() != null) {
+        sessionWrapper.get().release();
+      }
+      if (onComplete != null) onComplete.run();
+    };
+
+    if (!parallel || waitForFinalState) {
+      if (waitForFinalState) {
+        SolrCloseableLatch latch = new SolrCloseableLatch(totalReplicas, ocmh);
+        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collectionName, null,
+            createReplicas.stream().map(createReplica -> createReplica.coreName).collect(Collectors.toList()), latch);
+        try {
+          zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
+          runnable.run();
+          if (!latch.await(timeout, TimeUnit.SECONDS)) {
+            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
+          }
+        } finally {
+          zkStateReader.removeCollectionStateWatcher(collectionName, watcher);
+        }
+      } else {
+        runnable.run();
+      }
+    } else {
+      ocmh.tpe.submit(runnable);
+    }
+
+    return createReplicas.stream()
+        .map(createReplica -> new ZkNodeProps(
+            ZkStateReader.COLLECTION_PROP, createReplica.collectionName,
+            ZkStateReader.SHARD_ID_PROP, createReplica.sliceName,
+            ZkStateReader.CORE_NAME_PROP, createReplica.coreName,
+            ZkStateReader.NODE_NAME_PROP, createReplica.node
+        ))
+        .collect(Collectors.toList());
+  }
+
+  private ModifiableSolrParams getReplicaParams(ClusterState clusterState, ZkNodeProps message, NamedList results, String collectionName, DocCollection coll, boolean skipCreateReplicaInClusterState, String asyncId, ShardHandler shardHandler, CreateReplica createReplica) throws IOException, InterruptedException {
     if (coll.getStr(WITH_COLLECTION) != null) {
       String withCollectionName = coll.getStr(WITH_COLLECTION);
       DocCollection withCollection = clusterState.getCollection(withCollectionName);
@@ -109,14 +205,14 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
       }
       String withCollectionShard = withCollection.getActiveSlices().iterator().next().getName();
 
-      List<Replica> replicas = withCollection.getReplicas(node);
+      List<Replica> replicas = withCollection.getReplicas(createReplica.node);
       if (replicas == null || replicas.isEmpty()) {
         // create a replica of withCollection on the identified node before proceeding further
         ZkNodeProps props = new ZkNodeProps(
             Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
             ZkStateReader.COLLECTION_PROP, withCollectionName,
             ZkStateReader.SHARD_ID_PROP, withCollectionShard,
-            "node", node,
+            "node", createReplica.node,
             CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.TRUE.toString()); // set to true because we want `withCollection` to be ready after this collection is created
         addReplica(clusterState, props, results, null);
       }
@@ -130,14 +226,14 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
         ZkNodeProps props = new ZkNodeProps(
             Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
             ZkStateReader.COLLECTION_PROP, collectionName,
-            ZkStateReader.SHARD_ID_PROP, shard,
-            ZkStateReader.CORE_NAME_PROP, coreName,
+            ZkStateReader.SHARD_ID_PROP, createReplica.sliceName,
+            ZkStateReader.CORE_NAME_PROP, createReplica.coreName,
             ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node),
-            ZkStateReader.NODE_NAME_PROP, node,
-            ZkStateReader.REPLICA_TYPE, replicaType.name());
-        if (coreNodeName != null) {
-          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
+            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(createReplica.node),
+            ZkStateReader.NODE_NAME_PROP, createReplica.node,
+            ZkStateReader.REPLICA_TYPE, createReplica.replicaType.name());
+        if (createReplica.coreNodeName != null) {
+          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, createReplica.coreNodeName);
         }
         try {
           Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
@@ -146,7 +242,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
         }
       }
       params.set(CoreAdminParams.CORE_NODE_NAME,
-          ocmh.waitToSeeReplicasInState(collectionName, Collections.singletonList(coreName)).get(coreName).getName());
+          ocmh.waitToSeeReplicasInState(collectionName, Collections.singletonList(createReplica.coreName)).get(createReplica.coreName).getName());
     }
 
     String configName = zkStateReader.readConfigName(collectionName);
@@ -156,12 +252,12 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
 
     params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-    params.set(CoreAdminParams.NAME, coreName);
+    params.set(CoreAdminParams.NAME, createReplica.coreName);
     params.set(COLL_CONF, configName);
     params.set(CoreAdminParams.COLLECTION, collectionName);
-    params.set(CoreAdminParams.REPLICA_TYPE, replicaType.name());
-    if (shard != null) {
-      params.set(CoreAdminParams.SHARD, shard);
+    params.set(CoreAdminParams.REPLICA_TYPE, createReplica.replicaType.name());
+    if (createReplica.sliceName != null) {
+      params.set(CoreAdminParams.SHARD, createReplica.sliceName);
     } else if (routeKey != null) {
       Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
       if (slices.isEmpty()) {
@@ -181,108 +277,34 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     if (instanceDir != null) {
       params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
     }
-    if (coreNodeName != null) {
-      params.set(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
+    if (createReplica.coreNodeName != null) {
+      params.set(CoreAdminParams.CORE_NODE_NAME, createReplica.coreNodeName);
     }
     ocmh.addPropertyParams(message, params);
 
-    // For tracking async calls.
-    Map<String,String> requestMap = new HashMap<>();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-    ocmh.sendShardRequest(node, params, shardHandler, asyncId, requestMap);
-
-    final String fnode = node;
-    final String fcoreName = coreName;
-
-    Runnable runnable = () -> {
-      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
-      ocmh.waitForCoreNodeName(collectionName, fnode, fcoreName);
-      if (sessionWrapper.get() != null) {
-        sessionWrapper.get().release();
-      }
-      if (onComplete != null) onComplete.run();
-    };
-
-    if (!parallel || waitForFinalState) {
-      if (waitForFinalState) {
-        SolrCloseableLatch latch = new SolrCloseableLatch(1, ocmh);
-        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collectionName, null, Collections.singletonList(coreName), latch);
-        try {
-          zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
-          runnable.run();
-          if (!latch.await(timeout, TimeUnit.SECONDS)) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
-          }
-        } finally {
-          zkStateReader.removeCollectionStateWatcher(collectionName, watcher);
-        }
-      } else {
-        runnable.run();
-      }
-    } else {
-      ocmh.tpe.submit(runnable);
-    }
-
-
-    return new ZkNodeProps(
-        ZkStateReader.COLLECTION_PROP, collectionName,
-        ZkStateReader.SHARD_ID_PROP, shard,
-        ZkStateReader.CORE_NAME_PROP, coreName,
-        ZkStateReader.NODE_NAME_PROP, node
-    );
+    return params;
   }
 
-  public static ZkNodeProps assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                 ZkNodeProps message, AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+  public static CreateReplica assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                 ZkNodeProps message, ReplicaPosition replicaPosition) {
     boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
 
     String collection = message.getStr(COLLECTION_PROP);
-    String node = message.getStr(CoreAdminParams.NODE);
+    String node = replicaPosition.node;
     String shard = message.getStr(SHARD_ID_PROP);
     String coreName = message.getStr(CoreAdminParams.NAME);
     String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
+    Replica.Type replicaType = replicaPosition.type;
+
     if (StringUtils.isBlank(coreName)) {
       coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
     }
 
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-    }
-    if (coll.getSlice(shard) == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Collection: " + collection + " shard: " + shard + " does not exist");
-    }
-
-    // Kind of unnecessary, but it does put the logic of whether to override maxShardsPerNode in one place.
-    if (!skipCreateReplicaInClusterState) {
-      if (CloudUtil.usePolicyFramework(coll, cloudManager)) {
-        if (node == null) {
-          if(coll.getPolicyName() != null) message.getProperties().put(Policy.POLICY, coll.getPolicyName());
-          node = Assign.identifyNodes(cloudManager,
-              clusterState,
-              Collections.emptyList(),
-              collection,
-              message,
-              Collections.singletonList(shard),
-              replicaType == Replica.Type.NRT ? 1 : 0,
-              replicaType == Replica.Type.TLOG ? 1 : 0,
-              replicaType == Replica.Type.PULL ? 1 : 0
-          ).get(0).node;
-          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-        }
-      } else {
-        node = Assign.getNodesForNewReplicas(clusterState, collection, shard, 1, node,
-            cloudManager).get(0).nodeName;// TODO: use replica type in this logic too
-      }
-    }
     log.info("Node Identified {} for creating new replica of shard {}", node, shard);
-
     if (!clusterState.liveNodesContain(node)) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
     }
+    DocCollection coll = clusterState.getCollection(collection);
     if (coreName == null) {
       coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType);
     } else if (!skipCreateReplicaInClusterState) {
@@ -297,11 +319,103 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
         }
       }
     }
-    if (coreNodeName != null) {
-      message = message.plus(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
+    return new CreateReplica(collection, shard, node, replicaType, coreName, coreNodeName);
+  }
+
+  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                            String collectionName, ZkNodeProps message,
+                                                            EnumMap<Replica.Type, Integer> replicaTypeVsCount,
+                                                            AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
+    String sliceName = message.getStr(SHARD_ID_PROP);
+    DocCollection collection = clusterState.getCollection(collectionName);
+
+    int numNrtReplicas = replicaTypeVsCount.get(Replica.Type.NRT);
+    int numPullReplicas = replicaTypeVsCount.get(Replica.Type.PULL);
+    int numTlogReplicas = replicaTypeVsCount.get(Replica.Type.TLOG);
+    int totalReplicas = numNrtReplicas + numPullReplicas + numTlogReplicas;
+
+    String node = message.getStr(CoreAdminParams.NODE);
+    Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
+    if (createNodeSetStr == null) {
+      if (node != null) {
+        message.getProperties().put(OverseerCollectionMessageHandler.CREATE_NODE_SET, node);
+        createNodeSetStr = node;
+      }
     }
-    message = message.plus(CoreAdminParams.NAME, coreName);
-    message = message.plus(CoreAdminParams.NODE, node);
-    return message;
+
+    List<ReplicaPosition> positions = null;
+    if (!skipCreateReplicaInClusterState) {
+      if (CloudUtil.usePolicyFramework(collection, cloudManager)) {
+        if (node == null) {
+          if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
+          positions = Assign.identifyNodes(cloudManager,
+              clusterState,
+              Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM),
+              collection.getName(),
+              message,
+              Collections.singletonList(sliceName),
+              numNrtReplicas,
+              numTlogReplicas,
+              numPullReplicas);
+          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+        }
+      } else {
+        List<Assign.ReplicaCount> sortedNodeList = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, numNrtReplicas,
+            numTlogReplicas, numPullReplicas, createNodeSetStr, cloudManager);
+        int i = 0;
+        positions = new ArrayList<>();
+        for (Map.Entry<Replica.Type, Integer> e : replicaTypeVsCount.entrySet()) {
+          for (int j = 0; j < e.getValue(); j++) {
+            positions.add(new ReplicaPosition(sliceName, j + 1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
+            i++;
+          }
+        }
+      }
+    }
+
+    if (positions == null)  {
+      assert node != null;
+      if (node == null) {
+        // in case asserts are disabled
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "A node should have been identified to add replica but wasn't. Please inform solr developers at SOLR-9317");
+      }
+      // it is unlikely that multiple replicas have been requested to be created on
+      // the same node, but we've got to accommodate.
+      positions = new ArrayList<>(totalReplicas);
+      int i = 0;
+      for (Map.Entry<Replica.Type, Integer> entry : replicaTypeVsCount.entrySet()) {
+        for (int j = 0; j < entry.getValue(); j++) {
+          positions.add(new ReplicaPosition(sliceName, i++, entry.getKey(), node));
+        }
+      }
+    }
+    return positions;
   }
+
+  /**
+   * A data structure to keep all information required to create a new replica in one place.
+   * Think of it as a typed ZkNodeProps for replica creation.
+   *
+   * This is <b>not</b> a public API and can be changed at any time without notice.
+   */
+  public static class CreateReplica {
+    public final String collectionName;
+    public final String sliceName;
+    public final String node;
+    public final Replica.Type replicaType;
+    public String coreName;
+    public String coreNodeName;
+
+    CreateReplica(String collectionName, String sliceName, String node, Replica.Type replicaType, String coreName, String coreNodeName) {
+      this.collectionName = collectionName;
+      this.sliceName = sliceName;
+      this.node = node;
+      this.replicaType = replicaType;
+      this.coreName = coreName;
+      this.coreNodeName = coreNodeName;
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index d323510..42de84a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -273,7 +273,7 @@ public class Assign {
     } else {
       if (numTlogReplicas + numPullReplicas != 0 && rulesMap != null) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
+            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules");
       }
     }
 
@@ -322,11 +322,11 @@ public class Assign {
   // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
   // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
   public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
-                                                          String shard, int nrtReplicas,
+                                                          String shard, int nrtReplicas, int tlogReplicas, int pullReplicas,
                                                           Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException, AssignmentException {
-    log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
+    log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}", shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet );
     DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
+    Integer maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode();
     List<String> createNodeList = null;
 
     if (createNodeSet instanceof List) {
@@ -338,15 +338,15 @@ public class Assign {
      HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
 
     if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
-      int availableSlots = 0;
+      long availableSlots = 0;
       for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
         //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
         if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
           availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
         }
       }
-      if (availableSlots < nrtReplicas) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+      if (availableSlots < nrtReplicas + tlogReplicas + pullReplicas) {
+        throw new AssignmentException(
             String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of live nodes and a maxShardsPerNode of %d",
                 nrtReplicas, collectionName, maxShardsPerNode));
       }
@@ -355,13 +355,17 @@ public class Assign {
     List l = (List) coll.get(DocCollection.RULE);
     List<ReplicaPosition> replicaPositions = null;
     if (l != null) {
+      if (tlogReplicas + pullReplicas > 0)  {
+        throw new AssignmentException(Replica.Type.TLOG + " or " + Replica.Type.PULL +
+            " replica types not supported with placement rules");
+      }
       // TODO: make it so that this method doesn't require access to CC
       replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cloudManager, coll, createNodeList, l);
     }
     String policyName = coll.getStr(POLICY);
     AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
     if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, 0, 0,
+      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, tlogReplicas, pullReplicas,
           policyName, cloudManager, createNodeList);
     }
 
@@ -461,7 +465,7 @@ public class Assign {
       return nodeNameVsShardCount;
     }
     DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
+    int maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode();
     Map<String, DocCollection> collections = clusterState.getCollectionsMap();
     for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
       DocCollection c = entry.getValue();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
index 802583c..e7f35f1 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
@@ -17,32 +17,17 @@
 package org.apache.solr.cloud.api.collections;
 
 
-import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
 
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.cloud.CloudUtil;
 import org.apache.solr.cloud.Overseer;
-import org.apache.solr.common.SolrCloseableLatch;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.Utils;
@@ -77,114 +62,60 @@ public class CreateShardCmd implements OverseerCollectionMessageHandler.Cmd {
 
     DocCollection collection = clusterState.getCollection(collectionName);
 
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    SolrCloseableLatch countDownLatch;
-    try {
-      List<ReplicaPosition> positions = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, sessionWrapper);
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-      // wait for a while until we see the shard
-      ocmh.waitForNewShard(collectionName, sliceName);
-
-      String async = message.getStr(ASYNC);
-      countDownLatch = new SolrCloseableLatch(positions.size(), ocmh);
-      for (ReplicaPosition position : positions) {
-        String nodeName = position.node;
-        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(), collection, sliceName, position.type);
-        log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
-            + " on " + nodeName);
-
-        // Need to create new params for each request
-        ZkNodeProps addReplicasProps = new ZkNodeProps(
-            COLLECTION_PROP, collectionName,
-            SHARD_ID_PROP, sliceName,
-            ZkStateReader.REPLICA_TYPE, position.type.name(),
-            CoreAdminParams.NODE, nodeName,
-            CoreAdminParams.NAME, coreName,
-            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        Map<String, Object> propertyParams = new HashMap<>();
-        ocmh.addPropertyParams(message, propertyParams);
-        addReplicasProps = addReplicasProps.plus(propertyParams);
-        if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
-        final NamedList addResult = new NamedList();
-        ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
-          countDownLatch.countDown();
-          Object addResultFailure = addResult.get("failure");
-          if (addResultFailure != null) {
-            SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
-            if (failure == null) {
-              failure = new SimpleOrderedMap();
-              results.add("failure", failure);
-            }
-            failure.addAll((NamedList) addResultFailure);
-          } else {
-            SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
-            if (success == null) {
-              success = new SimpleOrderedMap();
-              results.add("success", success);
-            }
-            success.addAll((NamedList) addResult.get("success"));
-          }
-        });
-      }
-    } finally {
-      if (sessionWrapper.get() != null) sessionWrapper.get().release();
-    }
-
-    log.debug("Waiting for create shard action to complete");
-    countDownLatch.await(5, TimeUnit.MINUTES);
-    log.debug("Finished waiting for create shard action to complete");
-
-    log.info("Finished create command on all shards for collection: " + collectionName);
-
-  }
-
-  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
-         String collectionName, ZkNodeProps message, AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    String sliceName = message.getStr(SHARD_ID_PROP);
-    DocCollection collection = clusterState.getCollection(collectionName);
-
     int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
     int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
     int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
-    int totalReplicas = numNrtReplicas + numPullReplicas + numTlogReplicas;
 
     if (numNrtReplicas + numTlogReplicas <= 0) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
     }
 
-    Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
-
-    boolean usePolicyFramework = CloudUtil.usePolicyFramework(collection, cloudManager);
-    List<ReplicaPosition> positions;
-    if (usePolicyFramework) {
-      if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
-      positions = Assign.identifyNodes(cloudManager,
-          clusterState,
-          Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM),
-          collection.getName(),
-          message,
-          Collections.singletonList(sliceName),
-          numNrtReplicas,
-          numTlogReplicas,
-          numPullReplicas);
-      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-    } else {
-      List<Assign.ReplicaCount> sortedNodeList = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, totalReplicas,
-          createNodeSetStr, cloudManager);
-      int i = 0;
-      positions = new ArrayList<>();
-      for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
-          Replica.Type.TLOG, numTlogReplicas,
-          Replica.Type.PULL, numPullReplicas
-      ).entrySet()) {
-        for (int j = 0; j < e.getValue(); j++) {
-          positions.add(new ReplicaPosition(sliceName, j + 1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
-          i++;
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+    // wait for a while until we see the shard
+    ocmh.waitForNewShard(collectionName, sliceName);
+    String async = message.getStr(ASYNC);
+    ZkNodeProps addReplicasProps = new ZkNodeProps(
+        COLLECTION_PROP, collectionName,
+        SHARD_ID_PROP, sliceName,
+        ZkStateReader.NRT_REPLICAS, String.valueOf(numNrtReplicas),
+        ZkStateReader.TLOG_REPLICAS, String.valueOf(numTlogReplicas),
+        ZkStateReader.PULL_REPLICAS, String.valueOf(numPullReplicas),
+        OverseerCollectionMessageHandler.CREATE_NODE_SET, message.getStr(OverseerCollectionMessageHandler.CREATE_NODE_SET),
+        CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+
+    Map<String, Object> propertyParams = new HashMap<>();
+    ocmh.addPropertyParams(message, propertyParams);
+    addReplicasProps = addReplicasProps.plus(propertyParams);
+    if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
+    final NamedList addResult = new NamedList();
+    try {
+      ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
+        Object addResultFailure = addResult.get("failure");
+        if (addResultFailure != null) {
+          SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
+          if (failure == null) {
+            failure = new SimpleOrderedMap();
+            results.add("failure", failure);
+          }
+          failure.addAll((NamedList) addResultFailure);
+        } else {
+          SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
+          if (success == null) {
+            success = new SimpleOrderedMap();
+            results.add("success", success);
+          }
+          success.addAll((NamedList) addResult.get("success"));
         }
-      }
+      });
+    } catch (Assign.AssignmentException e) {
+      // clean up the slice that we created
+      ZkNodeProps deleteShard = new ZkNodeProps(COLLECTION_PROP, collectionName, SHARD_ID_PROP, sliceName, ASYNC, async);
+      new DeleteShardCmd(ocmh).call(clusterState, deleteShard, results);
+      throw e;
     }
-    return positions;
+
+    log.info("Finished create command on all shards for collection: " + collectionName);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
index 2df0f77..6071b1b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
@@ -268,7 +268,7 @@ public class MoveReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     NamedList addResult = new NamedList();
     SolrCloseableLatch countDownLatch = new SolrCloseableLatch(1, ocmh);
     ActiveReplicaWatcher watcher = null;
-    ZkNodeProps props = ocmh.addReplica(clusterState, addReplicasProps, addResult, null);
+    ZkNodeProps props = ocmh.addReplica(clusterState, addReplicasProps, addResult, null).get(0);
     log.debug("props " + props);
     if (replica.equals(slice.getLeader()) || waitForFinalState) {
       watcher = new ActiveReplicaWatcher(coll.getName(), null, Collections.singletonList(newCoreName), countDownLatch);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index e15c389..a724bc7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -709,7 +709,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
     }
   }
 
-  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+  List<ZkNodeProps> addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
       throws Exception {
 
     return ((AddReplicaCmd) commandMap.get(ADDREPLICA)).addReplica(clusterState, message, results, onComplete);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
index d08b519..a09eec3 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
@@ -140,7 +140,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
                 log.debug("Successfully created replica for collection={} shard={} on node={}",
                     sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
               }
-            });
+            }).get(0);
 
         if (addedReplica != null) {
           createdReplicas.add(addedReplica);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 3a46b2b..aef2448 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -694,6 +694,9 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
         throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
       copy(req.getParams(), map,
           REPLICATION_FACTOR,
+          NRT_REPLICAS,
+          TLOG_REPLICAS,
+          PULL_REPLICAS,
           CREATE_NODE_SET,
           WAIT_FOR_FINAL_STATE);
       return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
@@ -828,7 +831,11 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
           DATA_DIR,
           ULOG_DIR,
           REPLICA_TYPE,
-          WAIT_FOR_FINAL_STATE);
+          WAIT_FOR_FINAL_STATE,
+          NRT_REPLICAS,
+          TLOG_REPLICAS,
+          PULL_REPLICAS,
+          CREATE_NODE_SET);
       return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
     }),
     OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> (Map) new LinkedHashMap<>()),

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
index e338cc2..342a27d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
@@ -17,7 +17,10 @@
 package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
 
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -26,11 +29,15 @@ import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.util.LogLevel;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.client.solrj.response.RequestStatusState.COMPLETED;
+import static org.apache.solr.client.solrj.response.RequestStatusState.FAILED;
+
 /**
  *
  */
@@ -45,10 +52,75 @@ public class AddReplicaTest extends SolrCloudTestCase {
         .configure();
   }
 
+  @Before
+  public void setUp() throws Exception  {
+    super.setUp();
+    cluster.deleteAllCollections();
+  }
+
+  @Test
+  public void testAddMultipleReplicas() throws Exception  {
+    cluster.waitForAllNodes(5);
+    String collection = "testAddMultipleReplicas";
+    CloudSolrClient cloudClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collection, "conf1", 1, 1);
+    create.setMaxShardsPerNode(2);
+    cloudClient.request(create);
+
+    CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+        .setNrtReplicas(1)
+        .setTlogReplicas(1)
+        .setPullReplicas(1);
+    RequestStatusState status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120);
+    assertEquals(COMPLETED, status);
+    DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection);
+    assertNotNull(docCollection);
+    assertEquals(4, docCollection.getReplicas().size());
+    assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
+    assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
+    assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
+
+    // try to add 5 more replicas which should fail because numNodes(4)*maxShardsPerNode(2)=8 and 4 replicas already exist
+    addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+        .setNrtReplicas(3)
+        .setTlogReplicas(1)
+        .setPullReplicas(1);
+    status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120);
+    assertEquals(FAILED, status);
+    docCollection = cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection);
+    assertNotNull(docCollection);
+    // sanity check that everything is as before
+    assertEquals(4, docCollection.getReplicas().size());
+    assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
+    assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
+    assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
+
+    // but adding any number of replicas is supported if an explicit create node set is specified
+    // so test that as well
+    List<String> createNodeSet = new ArrayList<>(2);
+    createNodeSet.add(cluster.getRandomJetty(random()).getNodeName());
+    createNodeSet.add(cluster.getRandomJetty(random()).getNodeName());
+    addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+        .setNrtReplicas(3)
+        .setTlogReplicas(1)
+        .setPullReplicas(1)
+        .setCreateNodeSet(String.join(",", createNodeSet));
+    status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120);
+    assertEquals(COMPLETED, status);
+    docCollection = cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection);
+    assertNotNull(docCollection);
+    // sanity check that everything is as before
+    assertEquals(9, docCollection.getReplicas().size());
+    assertEquals(5, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
+    assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
+    assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
+  }
+
   @Test
   //commented 2-Aug-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 09-Apr-2018
   public void test() throws Exception {
-    cluster.waitForAllNodes(5000);
+    cluster.waitForAllNodes(5);
     String collection = "addreplicatest_coll";
 
     CloudSolrClient cloudClient = cluster.getSolrClient();
@@ -65,16 +137,16 @@ public class AddReplicaTest extends SolrCloudTestCase {
     addReplica.processAsync("000", cloudClient);
     CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("000");
     CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
-    assertTrue(rsp.getRequestStatus() != RequestStatusState.COMPLETED);
+    assertNotSame(rsp.getRequestStatus(), COMPLETED);
     // wait for async request success
     boolean success = false;
     for (int i = 0; i < 200; i++) {
       rsp = requestStatus.process(cloudClient);
-      if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
+      if (rsp.getRequestStatus() == COMPLETED) {
         success = true;
         break;
       }
-      assertFalse(rsp.toString(), rsp.getRequestStatus() == RequestStatusState.FAILED);
+      assertNotSame(rsp.toString(), rsp.getRequestStatus(), RequestStatusState.FAILED);
       Thread.sleep(500);
     }
     assertTrue(success);
@@ -82,23 +154,23 @@ public class AddReplicaTest extends SolrCloudTestCase {
     replicas2.removeAll(replicas);
     assertEquals(1, replicas2.size());
     Replica r = replicas2.iterator().next();
-    assertTrue(r.toString(), r.getState() != Replica.State.ACTIVE);
+    assertNotSame(r.toString(), r.getState(), Replica.State.ACTIVE);
 
     // use waitForFinalState
     addReplica.setWaitForFinalState(true);
     addReplica.processAsync("001", cloudClient);
     requestStatus = CollectionAdminRequest.requestStatus("001");
     rsp = requestStatus.process(cloudClient);
-    assertTrue(rsp.getRequestStatus() != RequestStatusState.COMPLETED);
+    assertNotSame(rsp.getRequestStatus(), COMPLETED);
     // wait for async request success
     success = false;
     for (int i = 0; i < 200; i++) {
       rsp = requestStatus.process(cloudClient);
-      if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
+      if (rsp.getRequestStatus() == COMPLETED) {
         success = true;
         break;
       }
-      assertFalse(rsp.toString(), rsp.getRequestStatus() == RequestStatusState.FAILED);
+      assertNotSame(rsp.toString(), rsp.getRequestStatus(), RequestStatusState.FAILED);
       Thread.sleep(500);
     }
     assertTrue(success);
@@ -114,7 +186,7 @@ public class AddReplicaTest extends SolrCloudTestCase {
       if (replica.getName().equals(replica2)) {
         continue; // may be still recovering
       }
-      assertTrue(coll.toString() + "\n" + replica.toString(), replica.getState() == Replica.State.ACTIVE);
+      assertSame(coll.toString() + "\n" + replica.toString(), replica.getState(), Replica.State.ACTIVE);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
index f68fa9e..09a119b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
@@ -158,7 +158,7 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
     assertTrue("Should have gotten the right error message back",
         e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
 
-    // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
+    // And finally, ensure that there are all the replicas we expect. We should have shards 1, 2 and 4 and each
     // should have exactly two replicas
     waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
       return DocCollection.isFullyActive(n, c, 4, 2);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index 39ae618..cd87bb5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -542,7 +542,7 @@ public class ShardSplitTest extends AbstractFullDistribZkTestBase {
 
   @Test
   public void testSplitShardWithRule() throws Exception {
-    doSplitShardWithRule(SolrIndexSplitter.SplitMethod.LINK);
+    doSplitShardWithRule(SolrIndexSplitter.SplitMethod.REWRITE);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 17b56d7..4b73200 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -413,23 +414,46 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     ClusterState clusterState = getClusterState();
     DocCollection coll = clusterState.getCollection(message.getStr(ZkStateReader.COLLECTION_PROP));
     AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    message = AddReplicaCmd.assignReplicaDetails(cloudManager, clusterState, message, sessionWrapper);
+
+    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
+    EnumMap<Replica.Type, Integer> replicaTypesVsCount = new EnumMap<>(Replica.Type.class);
+    replicaTypesVsCount.put(Replica.Type.NRT, message.getInt(NRT_REPLICAS, replicaType == Replica.Type.NRT ? 1 : 0));
+    replicaTypesVsCount.put(Replica.Type.TLOG, message.getInt(TLOG_REPLICAS, replicaType == Replica.Type.TLOG ? 1 : 0));
+    replicaTypesVsCount.put(Replica.Type.PULL, message.getInt(PULL_REPLICAS, replicaType == Replica.Type.PULL ? 1 : 0));
+
+    int totalReplicas = 0;
+    for (Map.Entry<Replica.Type, Integer> entry : replicaTypesVsCount.entrySet()) {
+      totalReplicas += entry.getValue();
+    }
+    if (totalReplicas > 1)  {
+      if (message.getStr(CoreAdminParams.NAME) != null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create " + totalReplicas + " replicas if 'name' parameter is specified");
+      }
+      if (message.getStr(CoreAdminParams.CORE_NODE_NAME) != null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create " + totalReplicas + " replicas if 'coreNodeName' parameter is specified");
+      }
+    }
+
+    List<ReplicaPosition> replicaPositions = AddReplicaCmd.buildReplicaPositions(cloudManager, clusterState, coll.getName(), message, replicaTypesVsCount, sessionWrapper);
+    for (ReplicaPosition replicaPosition : replicaPositions) {
+      AddReplicaCmd.CreateReplica createReplica = AddReplicaCmd.assignReplicaDetails(cloudManager, clusterState, message, replicaPosition);
+      if (message.getStr(CoreAdminParams.CORE_NODE_NAME) == null) {
+        createReplica.coreNodeName = Assign.assignCoreNodeName(stateManager, coll);
+      }
+      ReplicaInfo ri = new ReplicaInfo(
+          createReplica.coreNodeName,
+          createReplica.coreName,
+          createReplica.collectionName,
+          createReplica.sliceName,
+          createReplica.replicaType,
+          createReplica.node,
+          message.getProperties()
+      );
+      simAddReplica(ri.getNode(), ri, true);
+    }
     if (sessionWrapper.get() != null) {
       sessionWrapper.get().release();
     }
-    if (message.getStr(CoreAdminParams.CORE_NODE_NAME) == null) {
-      message = message.plus(CoreAdminParams.CORE_NODE_NAME, Assign.assignCoreNodeName(stateManager, coll));
-    }
-    ReplicaInfo ri = new ReplicaInfo(
-        message.getStr(CoreAdminParams.CORE_NODE_NAME),
-        message.getStr(CoreAdminParams.NAME),
-        message.getStr(ZkStateReader.COLLECTION_PROP),
-        message.getStr(ZkStateReader.SHARD_ID_PROP),
-        Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT)),
-        message.getStr(CoreAdminParams.NODE),
-        message.getProperties()
-    );
-    simAddReplica(message.getStr(CoreAdminParams.NODE), ri, true);
     results.add("success", "");
   }
 
@@ -1015,31 +1039,30 @@ public class SimClusterStateProvider implements ClusterStateProvider {
           .filter(e -> !e.getKey().equals("replicas"))
           .forEach(e -> props.put(e.getKey(), e.getValue()));
       // 2. create new replicas
-      AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-      List<ReplicaPosition> positions = CreateShardCmd.buildReplicaPositions(cloudManager, clusterState, collectionName,
-          message, sessionWrapper);
-      if (sessionWrapper.get() != null) {
-        sessionWrapper.get().release();
-      }
-      AtomicInteger replicaNum = new AtomicInteger(1);
-      positions.forEach(pos -> {
-        Map<String, Object> replicaProps = new HashMap<>();
-        replicaProps.put(ZkStateReader.SHARD_ID_PROP, pos.shard);
-        replicaProps.put(ZkStateReader.NODE_NAME_PROP, pos.node);
-        replicaProps.put(ZkStateReader.REPLICA_TYPE, pos.type.toString());
-        replicaProps.put(ZkStateReader.BASE_URL_PROP, Utils.getBaseUrlForNodeName(pos.node, "http"));
-        String coreName = String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, pos.shard, pos.type.name().substring(0,1).toLowerCase(Locale.ROOT),
-            replicaNum.getAndIncrement());
+      EnumMap<Replica.Type, Integer> replicaTypesVsCount = new EnumMap<>(Replica.Type.class);
+      int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
+      int numTlogReplicas = message.getInt(TLOG_REPLICAS, message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0)));
+      int numPullReplicas = message.getInt(PULL_REPLICAS, message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0)));
+      replicaTypesVsCount.put(Replica.Type.NRT, numNrtReplicas);
+      replicaTypesVsCount.put(Replica.Type.TLOG, numTlogReplicas);
+      replicaTypesVsCount.put(Replica.Type.PULL, numPullReplicas);
+
+      ZkNodeProps addReplicasProps = new ZkNodeProps(
+          COLLECTION_PROP, collectionName,
+          SHARD_ID_PROP, sliceName,
+          ZkStateReader.NRT_REPLICAS, String.valueOf(replicaTypesVsCount.get(Replica.Type.NRT)),
+          ZkStateReader.TLOG_REPLICAS, String.valueOf(replicaTypesVsCount.get(Replica.Type.TLOG)),
+          ZkStateReader.PULL_REPLICAS, String.valueOf(replicaTypesVsCount.get(Replica.Type.PULL)),
+          OverseerCollectionMessageHandler.CREATE_NODE_SET, message.getStr(OverseerCollectionMessageHandler.CREATE_NODE_SET)
+          );
+
         try {
-          replicaProps.put(ZkStateReader.CORE_NAME_PROP, coreName);
-          ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, collectionName, 0),
-              coreName, collectionName, pos.shard, pos.type, pos.node, replicaProps);
-          simAddReplica(pos.node, ri, false);
+          simAddReplica(addReplicasProps, results);
         } catch (Exception e) {
           throw new RuntimeException(e);
         }
-      });
-      Map<String, Object> colProps = collProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>());
+
+      collProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>());
 
       simRunLeaderElection(Collections.singleton(collectionName), true);
       results.add("success", "");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
index c964e44..3637428 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
@@ -236,7 +236,7 @@ public class TestSimPolicyCloud extends SimSolrCloudTestCase {
     String pullNodeName = nodes.get(1);
     int pullPort = (Integer)cluster.getSimNodeStateProvider().simGetNodeValue(pullNodeName, ImplicitSnitch.PORT);
 
-    String tlogNodeName = nodes.get(1);
+    String tlogNodeName = nodes.get(2);
     int tlogPort = (Integer)cluster.getSimNodeStateProvider().simGetNodeValue(tlogNodeName, ImplicitSnitch.PORT);
     log.info("NRT {} PULL {} , TLOG {} ", nrtNodeName, pullNodeName, tlogNodeName);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/solr-ref-guide/src/collections-api.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index e4230dd..d601069 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -396,6 +396,10 @@ Use SPLITSHARD for collections created with the 'compositeId' router (`router.ke
 
 `/admin/collections?action=CREATESHARD&shard=_shardName_&collection=_name_`
 
+The default values for `replicationFactor` or `nrtReplicas`, `tlogReplicas`, `pullReplicas` from the collection is used to determine the number of replicas to be created for the new shard. This can be customized by explicitly passing the corresponding parameters to the request.
+
+The API uses the Autoscaling framework to find the best possible nodes in the cluster when an Autoscaling preferences or policy is configured. Refer to <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Policy and Preferences>> section for more details.
+
 === CREATESHARD Parameters
 
 `collection`::
@@ -409,6 +413,15 @@ Allows defining the nodes to spread the new collection across. If not provided,
 +
 The format is a comma-separated list of node_names, such as `localhost:8983_solr,localhost:8984_solr,localhost:8985_solr`.
 
+`nrtReplicas`::
+The number of `nrt` replicas that should be created for the new shard (optional, the defaults for the collection is used if omitted)
+
+`tlogReplicas`::
+The number of `tlog` replicas that should be created for the new shard (optional, the defaults for the collection is used if omitted)
+
+`pullReplicas`::
+The number of `pull` replicas that should be created for the new shard (optional, the defaults for the collection is used if omitted)
+
 `property._name_=_value_`::
 Set core property _name_ to _value_. See the section <<defining-core-properties.adoc#defining-core-properties,Defining core.properties>> for details on supported properties and values.
 
@@ -1016,9 +1029,9 @@ http://localhost:8983/solr/admin/collections?action=DELETEREPLICA&collection=tes
 [[addreplica]]
 == ADDREPLICA: Add Replica
 
-Add a replica to a shard in a collection. The node name can be specified if the replica is to be created in a specific node.
+Add one or more replicas to a shard in a collection. The node name can be specified if the replica is to be created in a specific node. Otherwise, a set of nodes can be specified and the most suitable ones among them will be chosen to create the replica(s).
 
-The API uses the Autoscaling framework to find nodes that can satisfy the disk requirements for the new replica but only when an Autoscaling policy is configured. Refer to <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Policy and Preferences>> section for more details.
+The API uses the Autoscaling framework to find nodes that can satisfy the disk requirements for the new replica(s) but only when an Autoscaling preferences or policy is configured. Refer to <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Policy and Preferences>> section for more details.
 
 `/admin/collections?action=ADDREPLICA&collection=_collection_&shard=_shard_&node=_nodeName_`
 
@@ -1038,7 +1051,14 @@ If the exact shard name is not known, users may pass the `\_route_` value and th
 Ignored if the `shard` parameter is also specified.
 
 `node`::
-The name of the node where the replica should be created.
+The name of the node where the replica should be created (optional)
+
+`createNodeSet`::
+A comma-separated list of nodes among which the best ones will be chosen to place the replicas (optional)
++
+The format is a comma-separated list of node_names, such as `localhost:8983_solr,localhost:8984_solr,localhost:8985_solr`.
+
+If neither `node`, nor `createNodeSet` is specified then the best node(s) from among all the live nodes in the cluster are chosen.
 
 `instanceDir`::
 The instanceDir for the core that will be created.
@@ -1057,6 +1077,15 @@ The type of replica to create. These possible values are allowed:
 +
 See the section <<shards-and-indexing-data-in-solrcloud.adoc#types-of-replicas,Types of Replicas>> for more information about replica type options.
 
+`nrtReplicas`::
+The number of `nrt` replicas that should be created (optional, defaults to 1 if `type` is `nrt` otherwise 0).
+
+`tlogReplicas`::
+The number of `tlog` replicas that should be created (optional, defaults to 1 if `type` is `tlog` otherwise 0).
+
+`pullReplicas`::
+The number of `pull` replicas that should be created (optional, defaults to 1 if `type` is `pull` otherwise 0).
+
 `property._name_=_value_`::
 Set core property _name_ to _value_. See <<defining-core-properties.adoc#defining-core-properties,Defining core.properties>> for details about supported properties and values.
 
@@ -1096,6 +1125,39 @@ http://localhost:8983/solr/admin/collections?action=ADDREPLICA&collection=test2&
 </response>
 ----
 
+[source,text]
+----
+http://localhost:8983/solr/admin/collections?action=addreplica&collection=gettingstarted&shard=shard1&tlogReplicas=1&pullReplicas=1
+----
+
+*Output*
+
+[source,json]
+----
+{
+    "responseHeader": {
+        "status": 0,
+        "QTime": 784
+    },
+    "success": {
+        "127.0.1.1:7574_solr": {
+            "responseHeader": {
+                "status": 0,
+                "QTime": 257
+            },
+            "core": "gettingstarted_shard1_replica_p11"
+        },
+        "127.0.1.1:8983_solr": {
+            "responseHeader": {
+                "status": 0,
+                "QTime": 295
+            },
+            "core": "gettingstarted_shard1_replica_t10"
+        }
+    }
+}
+----
+
 [[clusterprop]]
 == CLUSTERPROP: Cluster Properties
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4bcace57/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index 4cf68d4..0f6de19 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -57,7 +57,10 @@ import static org.apache.solr.common.cloud.DocCollection.RULE;
 import static org.apache.solr.common.cloud.DocCollection.SNITCH;
 import static org.apache.solr.common.cloud.ZkStateReader.AUTO_ADD_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
 import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
 import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
 import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
@@ -1648,6 +1651,8 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
     protected String ulogDir;
     protected Properties properties;
     protected Replica.Type type;
+    protected Integer nrtReplicas, tlogReplicas, pullReplicas;
+    protected String createNodeSet;
 
     private AddReplica(String collection, String shard, String routeKey, Replica.Type type) {
       super(CollectionAction.ADDREPLICA);
@@ -1727,6 +1732,42 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       return shard;
     }
 
+    public Integer getNrtReplicas() {
+      return nrtReplicas;
+    }
+
+    public AddReplica setNrtReplicas(Integer nrtReplicas) {
+      this.nrtReplicas = nrtReplicas;
+      return this;
+    }
+
+    public Integer getTlogReplicas() {
+      return tlogReplicas;
+    }
+
+    public AddReplica setTlogReplicas(Integer tlogReplicas) {
+      this.tlogReplicas = tlogReplicas;
+      return this;
+    }
+
+    public Integer getPullReplicas() {
+      return pullReplicas;
+    }
+
+    public AddReplica setPullReplicas(Integer pullReplicas) {
+      this.pullReplicas = pullReplicas;
+      return this;
+    }
+
+    public String getCreateNodeSet() {
+      return createNodeSet;
+    }
+
+    public AddReplica setCreateNodeSet(String createNodeSet) {
+      this.createNodeSet = createNodeSet;
+      return this;
+    }
+
     @Override
     public SolrParams getParams() {
       ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
@@ -1759,6 +1800,18 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       if (properties != null) {
         addProperties(params, properties);
       }
+      if (nrtReplicas != null)  {
+        params.add(NRT_REPLICAS, String.valueOf(nrtReplicas));
+      }
+      if (tlogReplicas != null)  {
+        params.add(TLOG_REPLICAS, String.valueOf(tlogReplicas));
+      }
+      if (pullReplicas != null)  {
+        params.add(PULL_REPLICAS, String.valueOf(pullReplicas));
+      }
+      if (createNodeSet != null)  {
+        params.add(CREATE_NODE_SET_PARAM, createNodeSet);
+      }
       return params;
     }
 


[06/29] lucene-solr:jira/http2: SOLR-11836: Move CHANGES entry to the 7.6 section

Posted by da...@apache.org.
SOLR-11836: Move CHANGES entry to the 7.6 section


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/60569fbe
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/60569fbe
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/60569fbe

Branch: refs/heads/jira/http2
Commit: 60569fbe4e8e9f9f9d57da9bf2570f847a26965c
Parents: 4bcace5
Author: Varun Thacker <va...@apache.org>
Authored: Fri Sep 21 17:01:37 2018 -0700
Committer: Varun Thacker <va...@apache.org>
Committed: Fri Sep 21 17:01:37 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/60569fbe/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index ee1d7b7..0f97dcf 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -101,6 +101,12 @@ Other Changes
 * SOLR-12762: Fix javadoc for SolrCloudTestCase.clusterShape() method and add a method that validates only against
   Active slices (Anshum Gupta)
 
+Bug Fixes
+----------------------
+
+* SOLR-11836: FacetStream works with bucketSizeLimit of -1 which will fetch all the buckets.
+  (Alfonso Muñoz-Pomer Fuentes, Amrit Sarkar via Varun Thacker)
+
 
 ==================  7.5.0 ==================
 
@@ -357,9 +363,6 @@ Bug Fixes
 
 * SOLR-12733: SolrMetricReporterTest failure (Erick Erickson, David Smiley)
 
-* SOLR-11836: FacetStream works with bucketSizeLimit of -1 which will fetch all the buckets.
-  (Alfonso Muñoz-Pomer Fuentes, Amrit Sarkar via Varun Thacker)
-
 * SOLR-12765: Incorrect format of JMX cache stats. (Bojan Smid, ab)
 
 Optimizations


[26/29] lucene-solr:jira/http2: SOLR-12709: Add TestSimExtremeIndexing for testing simulated large indexing jobs. Several important improvements to the simulator.

Posted by da...@apache.org.
SOLR-12709: Add TestSimExtremeIndexing for testing simulated large indexing jobs.
Several important improvements to the simulator.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2369c896
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2369c896
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2369c896

Branch: refs/heads/jira/http2
Commit: 2369c8963412773592098475bdd8af1da81e3ac5
Parents: c587410
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Thu Sep 27 12:12:54 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Thu Sep 27 14:49:43 2018 +0200

----------------------------------------------------------------------
 .../cloud/autoscaling/ComputePlanAction.java    |   8 +-
 .../cloud/autoscaling/IndexSizeTrigger.java     |  12 +-
 .../apache/solr/metrics/SolrMetricManager.java  |   2 +-
 .../org/apache/solr/cloud/CloudTestUtils.java   |   6 +-
 .../cloud/autoscaling/IndexSizeTriggerTest.java |  10 +-
 .../cloud/autoscaling/sim/SimCloudManager.java  |  66 +-
 .../sim/SimClusterStateProvider.java            | 680 ++++++++++++++-----
 .../autoscaling/sim/SimDistribStateManager.java |   7 +
 .../autoscaling/sim/SimNodeStateProvider.java   |  21 +-
 .../sim/TestSimExecutePlanAction.java           |   4 +-
 .../autoscaling/sim/TestSimExtremeIndexing.java | 163 +++++
 .../autoscaling/sim/TestSimNodeLostTrigger.java |   2 +-
 .../autoscaling/sim/TestSimPolicyCloud.java     |  10 +-
 .../sim/TestSimTriggerIntegration.java          |   8 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |  35 +-
 .../solrj/cloud/autoscaling/PolicyHelper.java   |   4 +-
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |   4 +-
 17 files changed, 808 insertions(+), 234 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
index 6bad63d..7103bf5 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
@@ -136,7 +136,7 @@ public class ComputePlanAction extends TriggerActionBase {
               continue;
             }
           }
-          log.info("Computed Plan: {}", operation.getParams());
+          log.debug("Computed Plan: {}", operation.getParams());
           if (!collections.isEmpty()) {
             String coll = operation.getParams().get(CoreAdminParams.COLLECTION);
             if (coll != null && !collections.contains(coll)) {
@@ -175,7 +175,11 @@ public class ComputePlanAction extends TriggerActionBase {
     clusterState.forEachCollection(coll -> {
       Integer rf = coll.getReplicationFactor();
       if (rf == null) {
-        rf = coll.getReplicas().size() / coll.getSlices().size();
+        if (coll.getSlices().isEmpty()) {
+          rf = 1; // ???
+        } else {
+          rf = coll.getReplicas().size() / coll.getSlices().size();
+        }
       }
       totalRF.addAndGet(rf * coll.getSlices().size());
     });

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
index 3f2ea8a..6129cc7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
@@ -26,6 +26,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -431,8 +432,15 @@ public class IndexSizeTrigger extends TriggerBase {
                           Map<String, List<ReplicaInfo>> belowSize) {
       super(TriggerEventType.INDEXSIZE, source, eventTime, null);
       properties.put(TriggerEvent.REQUESTED_OPS, ops);
-      properties.put(ABOVE_SIZE_PROP, aboveSize);
-      properties.put(BELOW_SIZE_PROP, belowSize);
+      // avoid passing very large amounts of data here - just use replica names
+      TreeMap<String, String> above = new TreeMap<>();
+      aboveSize.forEach((coll, replicas) ->
+          replicas.forEach(r -> above.put(r.getCore(), "docs=" + r.getVariable(DOCS_SIZE_PROP) + ", bytes=" + r.getVariable(BYTES_SIZE_PROP))));
+      properties.put(ABOVE_SIZE_PROP, above);
+      TreeMap<String, String> below = new TreeMap<>();
+      belowSize.forEach((coll, replicas) ->
+          replicas.forEach(r -> below.put(r.getCore(), "docs=" + r.getVariable(DOCS_SIZE_PROP) + ", bytes=" + r.getVariable(BYTES_SIZE_PROP))));
+      properties.put(BELOW_SIZE_PROP, below);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java b/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
index f1b7923..e9cb111 100644
--- a/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
+++ b/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
@@ -791,7 +791,7 @@ public class SolrMetricManager {
    */
   public static String getRegistryName(SolrInfoBean.Group group, String... names) {
     String fullName;
-    String prefix = REGISTRY_NAME_PREFIX + group.toString() + ".";
+    String prefix = new StringBuilder(REGISTRY_NAME_PREFIX).append(group.name()).append('.').toString();
     // check for existing prefix and group
     if (names != null && names.length > 0 && names[0] != null && names[0].startsWith(prefix)) {
       // assume the first segment already was expanded

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java b/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
index b67b551..eb50b96 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
@@ -109,7 +109,7 @@ public class CloudTestUtils {
         log.trace("-- still not matching predicate: {}", state);
       }
     }
-    throw new TimeoutException("last state: " + coll);
+    throw new TimeoutException("last ClusterState: " + state + ", last coll state: " + coll);
   }
 
   /**
@@ -141,13 +141,13 @@ public class CloudTestUtils {
       }
       Collection<Slice> slices = withInactive ? collectionState.getSlices() : collectionState.getActiveSlices();
       if (slices.size() != expectedShards) {
-        log.trace("-- wrong number of active slices, expected={}, found={}", expectedShards, collectionState.getSlices().size());
+        log.trace("-- wrong number of slices, expected={}, found={}: {}", expectedShards, collectionState.getSlices().size(), collectionState.getSlices());
         return false;
       }
       Set<String> leaderless = new HashSet<>();
       for (Slice slice : slices) {
         int activeReplicas = 0;
-        if (requireLeaders && slice.getLeader() == null) {
+        if (requireLeaders && slice.getState() != Slice.State.INACTIVE && slice.getLeader() == null) {
           leaderless.add(slice.getName());
           continue;
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
index faabda1..fd93d03 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
@@ -93,7 +93,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
     configureCluster(2)
         .addConfig("conf", configset("cloud-minimal"))
         .configure();
-    if (random().nextBoolean() || true) {
+    if (random().nextBoolean()) {
       cloudManager = cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getSolrCloudManager();
       solrClient = cluster.getSolrClient();
       loader = cluster.getJettySolrRunner(0).getCoreContainer().getResourceLoader();
@@ -190,7 +190,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
       assertNotNull("should have fired an event", ev);
       List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>) ev.getProperty(TriggerEvent.REQUESTED_OPS);
       assertNotNull("should contain requestedOps", ops);
-      assertEquals("number of ops", 2, ops.size());
+      assertEquals("number of ops: " + ops, 2, ops.size());
       boolean shard1 = false;
       boolean shard2 = false;
       for (TriggerEvent.Op op : ops) {
@@ -361,7 +361,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
     CloudTestUtils.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
         CloudTestUtils.clusterShape(2, 2, false, true));
 
-    for (int i = 0; i < 10; i++) {
+    for (int i = 0; i < 20; i++) {
       SolrInputDocument doc = new SolrInputDocument("id", "id-" + (i * 100));
       solrClient.add(collectionName, doc);
     }
@@ -412,7 +412,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
     assertEquals(response.get("result").toString(), "success");
 
     // delete some docs to trigger a merge
-    for (int i = 0; i < 5; i++) {
+    for (int i = 0; i < 15; i++) {
       solrClient.deleteById(collectionName, "id-" + (i * 100));
     }
     solrClient.commit(collectionName);
@@ -425,7 +425,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
         "}";
     req = createAutoScalingRequest(SolrRequest.METHOD.POST, resumeTriggerCommand);
     response = solrClient.request(req);
-    assertEquals(response.get("result").toString(), "success");
+    assertEquals("success", response.get("result").toString());
 
     timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS));
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index 1f0b6cf..53e2c7e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -31,12 +31,14 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 
+import com.carrotsearch.randomizedtesting.RandomizedContext;
 import com.codahale.metrics.jvm.ClassLoadingGaugeSet;
 import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
 import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
@@ -50,6 +52,7 @@ import org.apache.solr.client.solrj.cloud.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.NodeStateProvider;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
+import org.apache.solr.client.solrj.cloud.autoscaling.Variable;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -118,6 +121,7 @@ public class SimCloudManager implements SolrCloudManager {
   private final String metricTag;
 
   private final List<SolrInputDocument> systemColl = Collections.synchronizedList(new ArrayList<>());
+  private final Map<String, Map<String, AtomicInteger>> eventCounts = new ConcurrentHashMap<>();
   private final MockSearchableSolrClient solrClient;
   private final Map<String, AtomicLong> opCounts = new ConcurrentSkipListMap<>();
 
@@ -129,9 +133,11 @@ public class SimCloudManager implements SolrCloudManager {
   private MetricsHandler metricsHandler;
   private MetricsHistoryHandler metricsHistoryHandler;
   private TimeSource timeSource;
+  private boolean useSystemCollection = true;
 
   private static int nodeIdPort = 10000;
-  public static int DEFAULT_DISK = 1024; // 1000 GiB
+  public static int DEFAULT_FREE_DISK = 1024; // 1000 GiB
+  public static int DEFAULT_TOTAL_DISK = 10240; // 10 TiB
   public static long DEFAULT_IDX_SIZE_BYTES = 10240; // 10 kiB
 
   /**
@@ -201,7 +207,14 @@ public class SimCloudManager implements SolrCloudManager {
               request = new QueryRequest(params);
             } else {
               // search request
-              return super.request(request, collection);
+              if (collection.equals(CollectionAdminParams.SYSTEM_COLL)) {
+                return super.request(request, collection);
+              } else {
+                // forward it
+                ModifiableSolrParams params = new ModifiableSolrParams(request.getParams());
+                params.set("collection", collection);
+                request = new QueryRequest(params);
+              }
             }
           } else {
             throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "when collection != null only UpdateRequest and QueryRequest are supported: request=" + request + ", collection=" + collection);
@@ -306,7 +319,8 @@ public class SimCloudManager implements SolrCloudManager {
     values.put(ImplicitSnitch.PORT, port);
     values.put(ImplicitSnitch.NODE, nodeId);
     values.put(ImplicitSnitch.CORES, 0);
-    values.put(ImplicitSnitch.DISK, DEFAULT_DISK);
+    values.put(ImplicitSnitch.DISK, DEFAULT_FREE_DISK);
+    values.put(Variable.Type.TOTALDISK.tagName, DEFAULT_TOTAL_DISK);
     values.put(ImplicitSnitch.SYSLOADAVG, 1.0);
     values.put(ImplicitSnitch.HEAPUSAGE, 123450000);
     values.put("sysprop.java.version", System.getProperty("java.version"));
@@ -353,7 +367,13 @@ public class SimCloudManager implements SolrCloudManager {
     Set<String> deadNodes = getSimNodeStateProvider().simGetDeadNodes();
     sb.append("## Dead nodes:\t\t" + deadNodes.size() + "\n");
     deadNodes.forEach(n -> sb.append("##\t\t" + n + "\n"));
-    sb.append("## Collections:\t" + getSimClusterStateProvider().simListCollections() + "\n");
+    sb.append("## Collections:\n");
+      clusterStateProvider.simGetCollectionStats().forEach((coll, stats) -> {
+        sb.append("##  * ").append(coll).append('\n');
+        stats.forEach((k, v) -> {
+          sb.append("##    " + k + "\t" + v + "\n");
+        });
+      });
     if (withCollections) {
       ClusterState state = clusterStateProvider.getClusterState();
       state.forEachCollection(coll -> sb.append(coll.toString() + "\n"));
@@ -386,6 +406,13 @@ public class SimCloudManager implements SolrCloudManager {
   }
 
   /**
+   * Get the source of randomness (usually initialized by the test suite).
+   */
+  public Random getRandom() {
+    return RandomizedContext.current().getRandom();
+  }
+
+  /**
    * Add a new node and initialize its node values (metrics). The
    * /live_nodes list is updated with the new node id.
    * @return new node id
@@ -448,6 +475,10 @@ public class SimCloudManager implements SolrCloudManager {
     }
   }
 
+  public void simSetUseSystemCollection(boolean useSystemCollection) {
+    this.useSystemCollection = useSystemCollection;
+  }
+
   /**
    * Clear the (simulated) .system collection.
    */
@@ -464,17 +495,7 @@ public class SimCloudManager implements SolrCloudManager {
   }
 
   public Map<String, Map<String, AtomicInteger>> simGetEventCounts() {
-    TreeMap<String, Map<String, AtomicInteger>> counts = new TreeMap<>();
-    synchronized (systemColl) {
-      for (SolrInputDocument d : systemColl) {
-        if (!"autoscaling_event".equals(d.getFieldValue("type"))) {
-          continue;
-        }
-        counts.computeIfAbsent((String)d.getFieldValue("event.source_s"), s -> new TreeMap<>())
-            .computeIfAbsent((String)d.getFieldValue("stage_s"), s -> new AtomicInteger())
-            .incrementAndGet();
-      }
-    }
+    TreeMap<String, Map<String, AtomicInteger>> counts = new TreeMap<>(eventCounts);
     return counts;
   }
 
@@ -705,6 +726,9 @@ public class SimCloudManager implements SolrCloudManager {
         rsp.setResponse(queryResponse.getValues());
         log.trace("-- response: {}", rsp);
         return rsp;
+      } else if (req instanceof QueryRequest) {
+        incrementCount("query");
+        return clusterStateProvider.simQuery((QueryRequest)req);
       }
     }
     if (req instanceof UpdateRequest) {
@@ -715,7 +739,17 @@ public class SimCloudManager implements SolrCloudManager {
       if (collection == null || collection.equals(CollectionAdminParams.SYSTEM_COLL)) {
         List<SolrInputDocument> docs = ureq.getDocuments();
         if (docs != null) {
-          systemColl.addAll(docs);
+          if (useSystemCollection) {
+            systemColl.addAll(docs);
+          }
+          for (SolrInputDocument d : docs) {
+            if (!"autoscaling_event".equals(d.getFieldValue("type"))) {
+              continue;
+            }
+            eventCounts.computeIfAbsent((String)d.getFieldValue("event.source_s"), s -> new TreeMap<>())
+                .computeIfAbsent((String)d.getFieldValue("stage_s"), s -> new AtomicInteger())
+                .incrementAndGet();
+          }
         }
         return new UpdateResponse();
       } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 08ce6bf..18e20e0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -27,20 +27,25 @@ import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Random;
 import java.util.Set;
+import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.ReentrantLock;
 
+import com.google.common.util.concurrent.AtomicDouble;
+import org.apache.commons.math3.stat.descriptive.SummaryStatistics;
 import org.apache.solr.client.solrj.cloud.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
 import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
@@ -51,7 +56,9 @@ import org.apache.solr.client.solrj.cloud.autoscaling.Variable;
 import org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
 import org.apache.solr.cloud.ActionThrottle;
 import org.apache.solr.cloud.CloudTestUtils;
@@ -65,6 +72,7 @@ import org.apache.solr.cloud.api.collections.SplitShardCmd;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.ZkWriteCommand;
+import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
@@ -79,6 +87,7 @@ import org.apache.solr.common.cloud.rule.ImplicitSnitch;
 import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Utils;
@@ -118,11 +127,14 @@ public class SimClusterStateProvider implements ClusterStateProvider {
 
   public static final long DEFAULT_DOC_SIZE_BYTES = 500;
 
+  private static final String BUFFERED_UPDATES = "__buffered_updates__";
+
   private final LiveNodesSet liveNodes;
   private final SimDistribStateManager stateManager;
   private final SimCloudManager cloudManager;
 
   private final Map<String, List<ReplicaInfo>> nodeReplicaMap = new ConcurrentHashMap<>();
+  private final Map<String, Map<String, List<ReplicaInfo>>> colShardReplicaMap = new ConcurrentHashMap<>();
   private final Map<String, Object> clusterProperties = new ConcurrentHashMap<>();
   private final Map<String, Map<String, Object>> collProperties = new ConcurrentHashMap<>();
   private final Map<String, Map<String, Map<String, Object>>> sliceProperties = new ConcurrentHashMap<>();
@@ -145,6 +157,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   private AtomicReference<Map<String, DocCollection>> collectionsStatesRef = new AtomicReference<>();
   private AtomicBoolean saveClusterState = new AtomicBoolean();
 
+  private Random bulkUpdateRandom = new Random(0);
+
   private transient boolean closed;
 
   /**
@@ -228,6 +242,14 @@ public class SimClusterStateProvider implements ClusterStateProvider {
 
   /**
    * Get random node id.
+   * @return one of the live nodes
+   */
+  public String simGetRandomNode() {
+    return simGetRandomNode(cloudManager.getRandom());
+  }
+
+  /**
+   * Get random node id.
    * @param random instance of random.
    * @return one of the live nodes
    */
@@ -506,12 +528,15 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       replicaInfo.getVariables().put(ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
       // add a property expected in Policy calculations, if missing
       if (replicaInfo.getVariable(Type.CORE_IDX.metricsAttribute) == null) {
-        replicaInfo.getVariables().put(Type.CORE_IDX.metricsAttribute, SimCloudManager.DEFAULT_IDX_SIZE_BYTES);
+        replicaInfo.getVariables().put(Type.CORE_IDX.metricsAttribute, new AtomicLong(SimCloudManager.DEFAULT_IDX_SIZE_BYTES));
         replicaInfo.getVariables().put(Variable.coreidxsize,
-            Type.CORE_IDX.convertVal(SimCloudManager.DEFAULT_IDX_SIZE_BYTES));
+            new AtomicDouble((Double)Type.CORE_IDX.convertVal(SimCloudManager.DEFAULT_IDX_SIZE_BYTES)));
       }
 
       replicas.add(replicaInfo);
+      colShardReplicaMap.computeIfAbsent(replicaInfo.getCollection(), c -> new ConcurrentHashMap<>())
+          .computeIfAbsent(replicaInfo.getShard(), s -> new ArrayList<>())
+          .add(replicaInfo);
 
       Map<String, Object> values = cloudManager.getSimNodeStateProvider().simGetAllNodeValues()
           .computeIfAbsent(nodeId, id -> new ConcurrentHashMap<>(SimCloudManager.createNodeValues(id)));
@@ -523,7 +548,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       cloudManager.getSimNodeStateProvider().simSetNodeValue(nodeId, ImplicitSnitch.CORES, cores + 1);
       Integer disk = (Integer)values.get(ImplicitSnitch.DISK);
       if (disk == null) {
-        disk = SimCloudManager.DEFAULT_DISK;
+        disk = SimCloudManager.DEFAULT_FREE_DISK;
       }
       cloudManager.getSimNodeStateProvider().simSetNodeValue(nodeId, ImplicitSnitch.DISK, disk - 1);
       // fake metrics
@@ -533,7 +558,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       cloudManager.getMetricManager().registry(registry).counter("UPDATE./update.requests");
       cloudManager.getMetricManager().registry(registry).counter("QUERY./select.requests");
       cloudManager.getMetricManager().registerGauge(null, registry,
-          () -> replicaInfo.getVariable(Type.CORE_IDX.metricsAttribute),
+          () -> ((Number)replicaInfo.getVariable(Type.CORE_IDX.metricsAttribute)).longValue(),
           "", true, "INDEX.sizeInBytes");
       // at this point nuke our cached DocCollection state
       collectionsStatesRef.set(null);
@@ -559,6 +584,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       for (int i = 0; i < replicas.size(); i++) {
         if (coreNodeName.equals(replicas.get(i).getName())) {
           ReplicaInfo ri = replicas.remove(i);
+          colShardReplicaMap.computeIfAbsent(ri.getCollection(), c -> new ConcurrentHashMap<>())
+              .computeIfAbsent(ri.getShard(), s -> new ArrayList<>())
+              .remove(ri);
           collectionsStatesRef.set(null);
 
           opDelay(ri.getCollection(), CollectionParams.CollectionAction.DELETEREPLICA.name());
@@ -598,6 +626,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       int version = oldData != null ? oldData.getVersion() : -1;
       Assert.assertEquals(clusterStateVersion, version + 1);
       stateManager.setData(ZkStateReader.CLUSTER_STATE, data, version);
+      log.debug("** saved cluster state version " + version);
       clusterStateVersion++;
     } catch (Exception e) {
       throw new IOException(e);
@@ -635,15 +664,22 @@ public class SimClusterStateProvider implements ClusterStateProvider {
         return;
       }
       dc.getSlices().forEach(s -> {
+        if (s.getState() == Slice.State.INACTIVE) {
+          log.trace("-- slice state is {}, skip leader election {} / {}", s.getState(), dc.getName(), s.getName());
+          return;
+        }
+        if (s.getState() != Slice.State.ACTIVE) {
+          log.trace("-- slice state is {}, but I will run leader election {} / {}", s.getState(), dc.getName(), s.getName());
+        }
         if (s.getLeader() != null) {
-          log.debug("-- already has leader {} / {}", dc.getName(), s.getName());
+          log.trace("-- already has leader {} / {}", dc.getName(), s.getName());
           return;
         }
         if (s.getReplicas().isEmpty()) {
-          log.debug("-- no replicas in {} / {}", dc.getName(), s.getName());
+          log.trace("-- no replicas in {} / {}", dc.getName(), s.getName());
           return;
         }
-        log.debug("-- submit leader election for {} / {}", dc.getName(), s.getName());
+        log.trace("-- submit leader election for {} / {}", dc.getName(), s.getName());
         cloudManager.submit(() -> {
           simRunLeaderElection(dc.getName(), s, saveClusterState);
           return true;
@@ -656,9 +692,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     AtomicBoolean stateChanged = new AtomicBoolean(Boolean.FALSE);
     Replica leader = s.getLeader();
     if (leader == null || !liveNodes.contains(leader.getNodeName())) {
-      log.debug("Running leader election for {} / {}", collection, s.getName());
+      log.trace("Running leader election for {} / {}", collection, s.getName());
       if (s.getReplicas().isEmpty()) { // no replicas - punt
-        log.debug("-- no replicas in {} / {}", collection, s.getName());
+        log.trace("-- no replicas in {} / {}", collection, s.getName());
         return;
       }
       ActionThrottle lt = getThrottle(collection, s.getName());
@@ -692,7 +728,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
           }
         });
         if (alreadyHasLeader.get()) {
-          log.debug("-- already has leader {} / {}: {}", collection, s.getName(), s);
+          log.trace("-- already has leader {} / {}: {}", collection, s.getName(), s);
           return;
         }
         if (active.isEmpty()) {
@@ -718,11 +754,11 @@ public class SimClusterStateProvider implements ClusterStateProvider {
         synchronized (ri) {
           ri.getVariables().put(ZkStateReader.LEADER_PROP, "true");
         }
+        log.debug("-- elected new leader for {} / {}: {}", collection, s.getName(), ri);
         stateChanged.set(true);
-        log.debug("-- elected new leader for " + collection + " / " + s.getName() + ": " + ri.getName());
       }
     } else {
-      log.debug("-- already has leader for {} / {}", collection, s.getName());
+      log.trace("-- already has leader for {} / {}", collection, s.getName());
     }
     if (stateChanged.get() || saveState) {
       collectionsStatesRef.set(null);
@@ -810,9 +846,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
               collection.getReplicas().size() + 1);
           try {
             replicaProps.put(ZkStateReader.CORE_NAME_PROP, coreName);
-            replicaProps.put("SEARCHER.searcher.deletedDocs", 0);
-            replicaProps.put("SEARCHER.searcher.numDocs", 0);
-            replicaProps.put("SEARCHER.searcher.maxDoc", 0);
+            replicaProps.put("SEARCHER.searcher.deletedDocs", new AtomicLong(0));
+            replicaProps.put("SEARCHER.searcher.numDocs", new AtomicLong(0));
+            replicaProps.put("SEARCHER.searcher.maxDoc", new AtomicLong(0));
             ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, withCollection, 0),
                 coreName, withCollection, withCollectionShard, pos.type, pos.node, replicaProps);
             cloudManager.submit(() -> {
@@ -833,9 +869,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
           replicaNum.getAndIncrement());
       try {
         replicaProps.put(ZkStateReader.CORE_NAME_PROP, coreName);
-        replicaProps.put("SEARCHER.searcher.deletedDocs", 0);
-        replicaProps.put("SEARCHER.searcher.numDocs", 0);
-        replicaProps.put("SEARCHER.searcher.maxDoc", 0);
+        replicaProps.put("SEARCHER.searcher.deletedDocs", new AtomicLong(0));
+        replicaProps.put("SEARCHER.searcher.numDocs", new AtomicLong(0));
+        replicaProps.put("SEARCHER.searcher.maxDoc", new AtomicLong(0));
         ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, collectionName, 0),
             coreName, collectionName, pos.shard, pos.type, pos.node, replicaProps);
         cloudManager.submit(() -> {
@@ -900,6 +936,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       collProperties.remove(collection);
       sliceProperties.remove(collection);
       leaderThrottles.remove(collection);
+      colShardReplicaMap.remove(collection);
 
       opDelay(collection, CollectionParams.CollectionAction.DELETE.name());
 
@@ -942,6 +979,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     lock.lockInterruptibly();
     try {
       nodeReplicaMap.clear();
+      colShardReplicaMap.clear();
       collProperties.clear();
       sliceProperties.clear();
       leaderThrottles.clear();
@@ -1086,12 +1124,24 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     sliceName.set(message.getStr(SHARD_ID_PROP));
     String splitKey = message.getStr("split.key");
 
-    // always invalidate cached collection states to get up-to-date metrics
-    collectionsStatesRef.set(null);
-
     ClusterState clusterState = getClusterState();
     DocCollection collection = clusterState.getCollection(collectionName);
     Slice parentSlice = SplitShardCmd.getParentSlice(clusterState, collectionName, sliceName, splitKey);
+    Replica leader = parentSlice.getLeader();
+    // XXX leader election may not have happened yet - should we require it?
+    if (leader == null) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Shard " + collectionName +
+          " /  " + sliceName.get() + " has no leader and can't be split");
+    }
+    // start counting buffered updates
+    Map<String, Object> props = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
+        .computeIfAbsent(sliceName.get(), ss -> new ConcurrentHashMap<>());
+    if (props.containsKey(BUFFERED_UPDATES)) {
+      log.debug("--- SOLR-12729: Overlapping splitShard commands for {} / {}", collectionName, sliceName.get());
+      return;
+    }
+    props.put(BUFFERED_UPDATES, new AtomicLong());
+
     List<DocRouter.Range> subRanges = new ArrayList<>();
     List<String> subSlices = new ArrayList<>();
     List<String> subShardNames = new ArrayList<>();
@@ -1117,12 +1167,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     if (sessionWrapper != null) sessionWrapper.release();
 
     // adjust numDocs / deletedDocs / maxDoc
-    Replica leader = parentSlice.getLeader();
-    // XXX leader election may not have happened yet - should we require it?
-    if (leader == null) {
-      leader = parentSlice.getReplicas().iterator().next();
-    }
-    String numDocsStr = leader.getStr("SEARCHER.searcher.numDocs", "0");
+    String numDocsStr = String.valueOf(getReplicaInfo(leader).getVariable("SEARCHER.searcher.numDocs", "0"));
     long numDocs = Long.parseLong(numDocsStr);
     long newNumDocs = numDocs / subSlices.size();
     long remainderDocs = numDocs % subSlices.size();
@@ -1130,10 +1175,23 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     long remainderIndexSize = SimCloudManager.DEFAULT_IDX_SIZE_BYTES + remainderDocs * DEFAULT_DOC_SIZE_BYTES;
     String remainderSlice = null;
 
+    // add slice props
+    for (int i = 0; i < subRanges.size(); i++) {
+      String subSlice = subSlices.get(i);
+      DocRouter.Range range = subRanges.get(i);
+      Map<String, Object> sliceProps = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
+          .computeIfAbsent(subSlice, ss -> new ConcurrentHashMap<>());
+      sliceProps.put(Slice.RANGE, range);
+      sliceProps.put(Slice.PARENT, sliceName.get());
+      sliceProps.put(ZkStateReader.STATE_PROP, Slice.State.CONSTRUCTION.toString());
+      sliceProps.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+    }
+    // add replicas
     for (ReplicaPosition replicaPosition : replicaPositions) {
       String subSliceName = replicaPosition.shard;
       String subShardNodeName = replicaPosition.node;
-      String solrCoreName = collectionName + "_" + subSliceName + "_replica" + (replicaPosition.index);
+//      String solrCoreName = collectionName + "_" + subSliceName + "_replica_n" + (replicaPosition.index);
+      String solrCoreName = Assign.buildSolrCoreName(collectionName, subSliceName, replicaPosition.type, Assign.incAndGetId(stateManager, collectionName, 0));
       Map<String, Object> replicaProps = new HashMap<>();
       replicaProps.put(ZkStateReader.SHARD_ID_PROP, replicaPosition.shard);
       replicaProps.put(ZkStateReader.NODE_NAME_PROP, replicaPosition.node);
@@ -1149,43 +1207,75 @@ public class SimClusterStateProvider implements ClusterStateProvider {
         replicasNumDocs += remainderDocs;
         replicasIndexSize += remainderIndexSize;
       }
-      replicaProps.put("SEARCHER.searcher.numDocs", replicasNumDocs);
-      replicaProps.put("SEARCHER.searcher.maxDoc", replicasNumDocs);
-      replicaProps.put("SEARCHER.searcher.deletedDocs", 0);
-      replicaProps.put(Type.CORE_IDX.metricsAttribute, replicasIndexSize);
-      replicaProps.put(Variable.coreidxsize, Type.CORE_IDX.convertVal(replicasIndexSize));
+      replicaProps.put("SEARCHER.searcher.numDocs", new AtomicLong(replicasNumDocs));
+      replicaProps.put("SEARCHER.searcher.maxDoc", new AtomicLong(replicasNumDocs));
+      replicaProps.put("SEARCHER.searcher.deletedDocs", new AtomicLong(0));
+      replicaProps.put(Type.CORE_IDX.metricsAttribute, new AtomicLong(replicasIndexSize));
+      replicaProps.put(Variable.coreidxsize, new AtomicDouble((Double)Type.CORE_IDX.convertVal(replicasIndexSize)));
 
       ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, collectionName, 0),
           solrCoreName, collectionName, replicaPosition.shard, replicaPosition.type, subShardNodeName, replicaProps);
       simAddReplica(replicaPosition.node, ri, false);
     }
-    // mark the old slice as inactive
+    simRunLeaderElection(Collections.singleton(collectionName), true);
+
+    // delay it once again to better simulate replica recoveries
+    //opDelay(collectionName, CollectionParams.CollectionAction.SPLITSHARD.name());
+
+    CloudTestUtils.waitForState(cloudManager, collectionName, 30, TimeUnit.SECONDS, (liveNodes, state) -> {
+      for (String subSlice : subSlices) {
+        Slice s = state.getSlice(subSlice);
+        if (s.getLeader() == null) {
+          log.debug("** no leader in {} / {}", collectionName, s);
+          return false;
+        }
+        if (s.getReplicas().size() < repFactor) {
+          log.debug("** expected {} repFactor but there are {} replicas", repFactor, s.getReplicas().size());
+          return false;
+        }
+      }
+      return true;
+    });
+    // mark the new slices as active and the old slice as inactive
+    log.trace("-- switching slice states after split shard: collection={}, parent={}, subSlices={}", collectionName,
+        sliceName.get(), subSlices);
     lock.lockInterruptibly();
     try {
-      Map<String, Object> props = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
+      Map<String, Object> sProps = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
           .computeIfAbsent(sliceName.get(), s -> new ConcurrentHashMap<>());
-      props.put(ZkStateReader.STATE_PROP, Slice.State.INACTIVE.toString());
-      props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+      sProps.put(ZkStateReader.STATE_PROP, Slice.State.INACTIVE.toString());
+      sProps.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+      AtomicLong bufferedUpdates = (AtomicLong)sProps.remove(BUFFERED_UPDATES);
+      if (bufferedUpdates.get() > 0) {
+        // apply buffered updates
+        long perShard = bufferedUpdates.get() / subSlices.size();
+        long remainder = bufferedUpdates.get() % subSlices.size();
+        log.debug("-- applying {} buffered docs from {} / {}, perShard={}, remainder={}", bufferedUpdates.get(),
+            collectionName, parentSlice.getName(), perShard, remainder);
+        for (int i = 0; i < subSlices.size(); i++) {
+          String sub = subSlices.get(i);
+          long numUpdates = perShard;
+          if (i == 0) {
+            numUpdates += remainder;
+          }
+          simSetShardValue(collectionName, sub, "SEARCHER.searcher.numDocs", numUpdates, true, false);
+          simSetShardValue(collectionName, sub, "SEARCHER.searcher.maxDoc", numUpdates, true, false);
+        }
+      }
       // XXX also mark replicas as down? currently SplitShardCmd doesn't do this
 
+      for (String s : subSlices) {
+        Map<String, Object> sliceProps = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
+            .computeIfAbsent(s, ss -> new ConcurrentHashMap<>());
+        sliceProps.put(ZkStateReader.STATE_PROP, Slice.State.ACTIVE.toString());
+        sliceProps.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+      }
+
       // invalidate cached state
       collectionsStatesRef.set(null);
     } finally {
       lock.unlock();
     }
-    // add slice props
-    for (int i = 0; i < subRanges.size(); i++) {
-      String subSlice = subSlices.get(i);
-      DocRouter.Range range = subRanges.get(i);
-      Map<String, Object> sliceProps = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
-          .computeIfAbsent(subSlice, ss -> new ConcurrentHashMap<>());
-      sliceProps.put(Slice.RANGE, range);
-      sliceProps.put(Slice.PARENT, sliceName.get());
-      sliceProps.put(ZkStateReader.STATE_PROP, Slice.State.ACTIVE.toString());
-      sliceProps.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
-    }
-    collectionsStatesRef.set(null);
-    simRunLeaderElection(Collections.singleton(collectionName), true);
     results.add("success", "");
 
   }
@@ -1216,7 +1306,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
 
     lock.lockInterruptibly();
     try {
-      sliceProperties.computeIfAbsent(collectionName, coll -> new ConcurrentHashMap<>()).remove(sliceName);
+      sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>()).remove(sliceName);
+      colShardReplicaMap.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>()).remove(sliceName);
       nodeReplicaMap.forEach((n, replicas) -> {
         Iterator<ReplicaInfo> it = replicas.iterator();
         while (it.hasNext()) {
@@ -1237,7 +1328,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
 
   public void createSystemCollection() throws IOException {
     try {
-      if (simListCollections().contains(CollectionAdminParams.SYSTEM_COLL)) {
+      if (colShardReplicaMap.containsKey(CollectionAdminParams.SYSTEM_COLL)) {
         return;
       }
       ZkNodeProps props = new ZkNodeProps(
@@ -1278,7 +1369,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     if (collection == null) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not set");
     }
-    if (!simListCollections().contains(collection)) {
+    if (!colShardReplicaMap.containsKey(collection)) {
       if (CollectionAdminParams.SYSTEM_COLL.equals(collection)) {
         // auto-create
         createSystemCollection();
@@ -1286,126 +1377,257 @@ public class SimClusterStateProvider implements ClusterStateProvider {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection '" + collection + "' doesn't exist");
       }
     }
-    // always reset first to get the current metrics - it's easier than to keep matching
-    // Replica with ReplicaInfo where the current real counts are stored
-    collectionsStatesRef.set(null);
+
     DocCollection coll = getClusterState().getCollection(collection);
     DocRouter router = coll.getRouter();
-
-    boolean modified = false;
-
-    lock.lockInterruptibly();
-    try {
-      List<String> deletes = req.getDeleteById();
-      if (deletes != null && !deletes.isEmpty()) {
-        for (String id : deletes) {
-          Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
-          // NOTE: we don't use getProperty because it uses PROPERTY_PROP_PREFIX
+    List<String> deletes = req.getDeleteById();
+    if (deletes != null && !deletes.isEmpty()) {
+      for (String id : deletes) {
+        Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
+        Replica leader = s.getLeader();
+        if (leader == null) {
+          log.debug("-- no leader in " + s);
+          continue;
+        }
+        cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(), leader)).counter("UPDATE./update.requests").inc();
+        ReplicaInfo ri = getReplicaInfo(leader);
+        Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
+        if (numDocs == null || numDocs.intValue() <= 0) {
+          log.debug("-- attempting to delete nonexistent doc " + id + " from " + s.getLeader());
+          continue;
+        }
+        AtomicLong bufferedUpdates = (AtomicLong)sliceProperties.get(collection).get(s.getName()).get(BUFFERED_UPDATES);
+        if (bufferedUpdates != null) {
+          if (bufferedUpdates.get() > 0) {
+            bufferedUpdates.decrementAndGet();
+          } else {
+            log.debug("-- attempting to delete nonexistent buffered doc " + id + " from " + s.getLeader());
+          }
+          continue;
+        }
+        lock.lockInterruptibly();
+        try {
+          simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", 1, true, false);
+          simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", -1, true, false);
+          Number indexSize = (Number)ri.getVariable(Type.CORE_IDX.metricsAttribute);
+          if (indexSize != null && indexSize.longValue() > SimCloudManager.DEFAULT_IDX_SIZE_BYTES) {
+            indexSize = indexSize.longValue() - DEFAULT_DOC_SIZE_BYTES;
+            simSetShardValue(collection, s.getName(), Type.CORE_IDX.metricsAttribute,
+                new AtomicLong(indexSize.longValue()), false, false);
+            simSetShardValue(collection, s.getName(), Variable.coreidxsize,
+                new AtomicDouble((Double)Type.CORE_IDX.convertVal(indexSize)), false, false);
+          } else {
+            throw new Exception("unexpected indexSize ri=" + ri);
+          }
+        } catch (Exception e) {
+          throw new IOException(e);
+        } finally {
+          lock.unlock();
+        }
+      }
+    }
+    deletes = req.getDeleteQuery();
+    if (deletes != null && !deletes.isEmpty()) {
+      for (String q : deletes) {
+        if (!"*:*".equals(q)) {
+          throw new UnsupportedOperationException("Only '*:*' query is supported in deleteByQuery");
+        }
+        for (Slice s : coll.getSlices()) {
           Replica leader = s.getLeader();
           if (leader == null) {
             log.debug("-- no leader in " + s);
             continue;
           }
+
           cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(), leader)).counter("UPDATE./update.requests").inc();
           ReplicaInfo ri = getReplicaInfo(leader);
           Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
-          if (numDocs == null || numDocs.intValue() <= 0) {
-            log.debug("-- attempting to delete nonexistent doc " + id + " from " + s.getLeader());
+          if (numDocs == null || numDocs.intValue() == 0) {
             continue;
           }
-          modified = true;
+          lock.lockInterruptibly();
           try {
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", 1, true, false);
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", -1, true, false);
-            Number indexSize = (Number)ri.getVariable(Type.CORE_IDX.metricsAttribute);
-            if (indexSize != null && indexSize.longValue() > SimCloudManager.DEFAULT_IDX_SIZE_BYTES) {
-              indexSize = indexSize.longValue() - DEFAULT_DOC_SIZE_BYTES;
-              simSetShardValue(collection, s.getName(), Type.CORE_IDX.metricsAttribute,
-                  indexSize.intValue(), false, false);
-              simSetShardValue(collection, s.getName(), Variable.coreidxsize,
-                  Type.CORE_IDX.convertVal(indexSize), false, false);
-            } else {
-              throw new Exception("unexpected indexSize ri=" + ri);
-            }
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", new AtomicLong(numDocs.longValue()), false, false);
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", new AtomicLong(0), false, false);
+            simSetShardValue(collection, s.getName(), Type.CORE_IDX.metricsAttribute,
+                new AtomicLong(SimCloudManager.DEFAULT_IDX_SIZE_BYTES), false, false);
+            simSetShardValue(collection, s.getName(), Variable.coreidxsize,
+                new AtomicDouble((Double)Type.CORE_IDX.convertVal(SimCloudManager.DEFAULT_IDX_SIZE_BYTES)), false, false);
           } catch (Exception e) {
             throw new IOException(e);
+          } finally {
+            lock.unlock();
           }
         }
       }
-      deletes = req.getDeleteQuery();
-      if (deletes != null && !deletes.isEmpty()) {
-        for (String q : deletes) {
-          if (!"*:*".equals(q)) {
-            throw new UnsupportedOperationException("Only '*:*' query is supported in deleteByQuery");
+    }
+    List<SolrInputDocument> docs = req.getDocuments();
+    int docCount = 0;
+    Iterator<SolrInputDocument> it = null;
+    if (docs != null) {
+      docCount = docs.size();
+    } else {
+      it = req.getDocIterator();
+      if (it != null) {
+        while (it.hasNext()) {
+          it.next();
+          docCount++;
+        }
+      }
+    }
+    if (docCount > 0) {
+      // this approach to updating counters and metrics drastically increases performance
+      // of bulk updates, because simSetShardValue is relatively costly
+
+      Map<String, AtomicLong> docUpdates = new HashMap<>();
+      Map<String, Map<String, AtomicLong>> metricUpdates = new HashMap<>();
+
+      // XXX don't add more than 2bln docs in one request
+      boolean modified = false;
+      lock.lockInterruptibly();
+      try {
+        coll = getClusterState().getCollection(collection);
+        Slice[] slices = coll.getActiveSlicesArr();
+        if (slices.length == 0) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection without slices");
+        }
+        int[] perSlice = new int[slices.length];
+
+        if (it != null) {
+          // BULK UPDATE: simulate random doc assignment without actually calling DocRouter,
+          // which adds significant overhead
+
+          int totalAdded = 0;
+          for (int i = 0; i < slices.length; i++) {
+            Slice s = slices[i];
+            long count = (long) docCount * ((long) s.getRange().max - (long) s.getRange().min) / 0x100000000L;
+            perSlice[i] = (int) count;
+            totalAdded += perSlice[i];
+          }
+          // loss of precision due to integer math
+          int diff = docCount - totalAdded;
+          if (diff > 0) {
+            // spread the remainder more or less equally
+            int perRemain = diff / slices.length;
+            int remainder = diff % slices.length;
+            int remainderSlice = slices.length > 1 ? bulkUpdateRandom.nextInt(slices.length) : 0;
+            for (int i = 0; i < slices.length; i++) {
+              perSlice[i] += perRemain;
+              if (i == remainderSlice) {
+                perSlice[i] += remainder;
+              }
+            }
           }
-          for (Slice s : coll.getSlices()) {
+          for (int i = 0; i < slices.length; i++) {
+            Slice s = slices[i];
             Replica leader = s.getLeader();
             if (leader == null) {
               log.debug("-- no leader in " + s);
               continue;
             }
-
-            cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(), leader)).counter("UPDATE./update.requests").inc();
-            ReplicaInfo ri = getReplicaInfo(leader);
-            Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
-            if (numDocs == null || numDocs.intValue() == 0) {
+            metricUpdates.computeIfAbsent(s.getName(), sh -> new HashMap<>())
+                .computeIfAbsent(leader.getCoreName(), cn -> new AtomicLong())
+                .addAndGet(perSlice[i]);
+            modified = true;
+            AtomicLong bufferedUpdates = (AtomicLong)sliceProperties.get(collection).get(s.getName()).get(BUFFERED_UPDATES);
+            if (bufferedUpdates != null) {
+              bufferedUpdates.addAndGet(perSlice[i]);
+              continue;
+            }
+            docUpdates.computeIfAbsent(s.getName(), sh -> new AtomicLong())
+                .addAndGet(perSlice[i]);
+          }
+        } else {
+          // SMALL UPDATE: use exact assignment via DocRouter
+          for (SolrInputDocument doc : docs) {
+            String id = (String) doc.getFieldValue("id");
+            if (id == null) {
+              throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Document without id: " + doc);
+            }
+            Slice s = coll.getRouter().getTargetSlice(id, doc, null, null, coll);
+            Replica leader = s.getLeader();
+            if (leader == null) {
+              log.debug("-- no leader in " + s);
               continue;
             }
+            metricUpdates.computeIfAbsent(s.getName(), sh -> new HashMap<>())
+                .computeIfAbsent(leader.getCoreName(), cn -> new AtomicLong())
+                .incrementAndGet();
             modified = true;
-            try {
-              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", numDocs, false, false);
-              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 0, false, false);
-              simSetShardValue(collection, s.getName(), Type.CORE_IDX.metricsAttribute,
-                  SimCloudManager.DEFAULT_IDX_SIZE_BYTES, false, false);
-              simSetShardValue(collection, s.getName(), Variable.coreidxsize,
-                  Type.CORE_IDX.convertVal(SimCloudManager.DEFAULT_IDX_SIZE_BYTES), false, false);
-            } catch (Exception e) {
-              throw new IOException(e);
+            AtomicLong bufferedUpdates = (AtomicLong)sliceProperties.get(collection).get(s.getName()).get(BUFFERED_UPDATES);
+            if (bufferedUpdates != null) {
+              bufferedUpdates.incrementAndGet();
+              continue;
             }
+            docUpdates.computeIfAbsent(s.getName(), sh -> new AtomicLong())
+                .incrementAndGet();
           }
         }
-      }
-      List<SolrInputDocument> docs = req.getDocuments();
-      if (docs != null && !docs.isEmpty()) {
-        for (SolrInputDocument doc : docs) {
-          String id = (String) doc.getFieldValue("id");
-          if (id == null) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Document without id: " + doc);
-          }
-          Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
-          Replica leader = s.getLeader();
-          if (leader == null) {
-            log.debug("-- no leader in " + s);
-            continue;
-          }
-          cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(), leader)).counter("UPDATE./update.requests").inc();
-          modified = true;
-          try {
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 1, true, false);
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.maxDoc", 1, true, false);
 
-            ReplicaInfo ri = getReplicaInfo(leader);
-            Number indexSize = (Number)ri.getVariable(Type.CORE_IDX.metricsAttribute);
-            // for each new document increase the size by DEFAULT_DOC_SIZE_BYTES
-            indexSize = indexSize.longValue() + DEFAULT_DOC_SIZE_BYTES;
-            simSetShardValue(collection, s.getName(), Type.CORE_IDX.metricsAttribute,
-                indexSize.longValue(), false, false);
-            simSetShardValue(collection, s.getName(), Variable.coreidxsize,
-                Type.CORE_IDX.convertVal(indexSize), false, false);
-          } catch (Exception e) {
-            throw new IOException(e);
-          }
+        if (modified) {
+          docUpdates.forEach((sh, count) -> {
+            try {
+              simSetShardValue(collection, sh, "SEARCHER.searcher.numDocs", count.get(), true, false);
+              simSetShardValue(collection, sh, "SEARCHER.searcher.maxDoc", count.get(), true, false);
+              // for each new document increase the size by DEFAULT_DOC_SIZE_BYTES
+              simSetShardValue(collection, sh, Type.CORE_IDX.metricsAttribute,
+                  DEFAULT_DOC_SIZE_BYTES * count.get(), true, false);
+              simSetShardValue(collection, sh, Variable.coreidxsize,
+                  Type.CORE_IDX.convertVal(DEFAULT_DOC_SIZE_BYTES * count.get()), true, false);
+            } catch (Exception e) {
+              throw new RuntimeException(e);
+            }
+          });
+          metricUpdates.forEach((sh, cores) -> {
+            cores.forEach((core, count) -> {
+              String registry = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, collection, sh,
+                  Utils.parseMetricsReplicaName(collection, core));
+              cloudManager.getMetricManager().registry(registry).counter("UPDATE./update.requests").inc(count.get());
+            });
+          });
         }
+      } finally {
+        lock.unlock();
       }
-      if (modified) {
-        collectionsStatesRef.set(null);
-      }
-    } finally {
-      lock.unlock();
     }
     return new UpdateResponse();
   }
 
+  public QueryResponse simQuery(QueryRequest req) throws SolrException, InterruptedException, IOException {
+    ensureNotClosed();
+    String collection = req.getCollection();
+    if (collection == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not set");
+    }
+    if (!colShardReplicaMap.containsKey(collection)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection does not exist");
+    }
+    String query = req.getParams().get(CommonParams.Q);
+    if (query == null || !query.equals("*:*")) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only '*:*' query is supported");
+    }
+    ClusterState clusterState = getClusterState();
+    DocCollection coll = clusterState.getCollection(collection);
+    AtomicLong count = new AtomicLong();
+    for (Slice s : coll.getActiveSlicesArr()) {
+      Replica r = s.getLeader();
+      if (r == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, collection + "/" + s.getName() + " has no leader");
+      }
+      ReplicaInfo ri = getReplicaInfo(r);
+      Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs", 0L);
+      count.addAndGet(numDocs.longValue());
+    }
+    QueryResponse rsp = new QueryResponse();
+    NamedList<Object> values = new NamedList<>();
+    values.add("responseHeader", new NamedList<>());
+    SolrDocumentList docs = new SolrDocumentList();
+    docs.setNumFound(count.get());
+    values.add("response", docs);
+    rsp.setResponse(values);
+    return rsp;
+  }
+
   private static String createRegistryName(String collection, String shard, Replica r) {
     return SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, collection, shard,
         Utils.parseMetricsReplicaName(collection, r.getCoreName()));
@@ -1572,17 +1794,15 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    *               divided by the number of replicas.
    */
   public void simSetShardValue(String collection, String shard, String key, Object value, boolean delta, boolean divide) throws Exception {
-    List<ReplicaInfo> infos = new ArrayList<>();
-    nodeReplicaMap.forEach((n, replicas) -> {
-      replicas.forEach(r -> {
-        if (r.getCollection().equals(collection)) {
-          if (shard != null && !shard.equals(r.getShard())) {
-            return;
-          }
-          infos.add(r);
-        }
-      });
-    });
+    final List<ReplicaInfo> infos;
+    if (shard == null) {
+      infos = new ArrayList<>();
+      colShardReplicaMap.computeIfAbsent(collection, c -> new ConcurrentHashMap<>())
+        .forEach((sh, replicas) -> infos.addAll(replicas));
+    } else {
+      infos = colShardReplicaMap.computeIfAbsent(collection, c -> new ConcurrentHashMap<>())
+          .computeIfAbsent(shard, s -> new ArrayList<>());
+    }
     if (infos.isEmpty()) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection " + collection + " doesn't exist (shard=" + shard + ").");
     }
@@ -1602,22 +1822,50 @@ public class SimClusterStateProvider implements ClusterStateProvider {
             Object prevValue = r.getVariables().get(key);
             if (prevValue != null) {
               if ((prevValue instanceof Number) && (value instanceof Number)) {
-                if (((prevValue instanceof Long) || (prevValue instanceof Integer)) &&
+                if (((prevValue instanceof Long) || (prevValue instanceof Integer) ||
+                    (prevValue instanceof AtomicLong) || (prevValue instanceof AtomicInteger)) &&
                     ((value instanceof Long) || (value instanceof Integer))) {
-                  Long newValue = ((Number)prevValue).longValue() + ((Number)value).longValue();
-                  r.getVariables().put(key, newValue);
+                  long newValue = ((Number)prevValue).longValue() + ((Number)value).longValue();
+                  // minimize object allocations
+                  if (prevValue instanceof AtomicLong) {
+                    ((AtomicLong)prevValue).set(newValue);
+                  } else if (prevValue instanceof AtomicInteger) {
+                    ((AtomicInteger)prevValue).set(((Number)prevValue).intValue() + ((Number)value).intValue());
+                  } else {
+                    r.getVariables().put(key, newValue);
+                  }
                 } else {
-                  Double newValue = ((Number)prevValue).doubleValue() + ((Number)value).doubleValue();
-                  r.getVariables().put(key, newValue);
+                  double newValue = ((Number)prevValue).doubleValue() + ((Number)value).doubleValue();
+                  if (prevValue instanceof AtomicDouble) {
+                    ((AtomicDouble)prevValue).set(newValue);
+                  } else {
+                    r.getVariables().put(key, newValue);
+                  }
                 }
               } else {
                 throw new UnsupportedOperationException("delta cannot be applied to non-numeric values: " + prevValue + " and " + value);
               }
             } else {
-              r.getVariables().put(key, value);
+              if (value instanceof Integer) {
+                r.getVariables().put(key, new AtomicInteger((Integer)value));
+              } else if (value instanceof Long) {
+                r.getVariables().put(key, new AtomicLong((Long)value));
+              } else if (value instanceof Double) {
+                r.getVariables().put(key, new AtomicDouble((Double)value));
+              } else {
+                r.getVariables().put(key, value);
+              }
             }
           } else {
-            r.getVariables().put(key, value);
+            if (value instanceof Integer) {
+              r.getVariables().put(key, new AtomicInteger((Integer)value));
+            } else if (value instanceof Long) {
+              r.getVariables().put(key, new AtomicLong((Long)value));
+            } else if (value instanceof Double) {
+              r.getVariables().put(key, new AtomicDouble((Double)value));
+            } else {
+              r.getVariables().put(key, value);
+            }
           }
         }
       }
@@ -1639,21 +1887,128 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     }
   }
 
+  public List<ReplicaInfo> simGetReplicaInfos(String collection, String shard) {
+    List<ReplicaInfo> replicas = colShardReplicaMap.computeIfAbsent(collection, c -> new ConcurrentHashMap<>())
+        .computeIfAbsent(shard, s -> new ArrayList<>());
+    if (replicas == null) {
+      return Collections.emptyList();
+    } else {
+      // make a defensive copy to avoid ConcurrentModificationException
+      return Arrays.asList(replicas.toArray(new ReplicaInfo[replicas.size()]));
+    }
+  }
+
   /**
    * List collections.
    * @return list of existing collections.
    */
   public List<String> simListCollections() throws InterruptedException {
-    final Set<String> collections = new HashSet<>();
+    return new ArrayList<>(colShardReplicaMap.keySet());
+  }
+
+  public Map<String, Map<String, Object>> simGetCollectionStats() throws IOException, InterruptedException {
+    Map<String, Map<String, Object>> stats = new TreeMap<>();
     lock.lockInterruptibly();
     try {
-      nodeReplicaMap.forEach((n, replicas) -> {
-        replicas.forEach(ri -> collections.add(ri.getCollection()));
+      collectionsStatesRef.set(null);
+      ClusterState state = getClusterState();
+      state.forEachCollection(coll -> {
+        Map<String, Object> perColl = new LinkedHashMap<>();
+        stats.put(coll.getName(), perColl);
+        perColl.put("shardsTotal", coll.getSlices().size());
+        Map<String, AtomicInteger> shardState = new TreeMap<>();
+        int noLeader = 0;
+
+        SummaryStatistics docs = new SummaryStatistics();
+        SummaryStatistics bytes = new SummaryStatistics();
+        SummaryStatistics inactiveDocs = new SummaryStatistics();
+        SummaryStatistics inactiveBytes = new SummaryStatistics();
+
+        long deletedDocs = 0;
+        long bufferedDocs = 0;
+        int totalReplicas = 0;
+        int activeReplicas = 0;
+
+        for (Slice s : coll.getSlices()) {
+          shardState.computeIfAbsent(s.getState().toString(), st -> new AtomicInteger())
+              .incrementAndGet();
+          totalReplicas += s.getReplicas().size();
+          if (s.getState() != Slice.State.ACTIVE) {
+            if (!s.getReplicas().isEmpty()) {
+              ReplicaInfo ri = getReplicaInfo(s.getReplicas().iterator().next());
+              if (ri != null) {
+                Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
+                Number numBytes = (Number)ri.getVariable("INDEX.sizeInBytes");
+                if (numDocs != null) {
+                  inactiveDocs.addValue(numDocs.doubleValue());
+                }
+                if (numBytes != null) {
+                  inactiveBytes.addValue(numBytes.doubleValue());
+                }
+              }
+            }
+            continue;
+          }
+          AtomicLong buffered = (AtomicLong)sliceProperties.get(coll.getName()).get(s.getName()).get(BUFFERED_UPDATES);
+          if (buffered != null) {
+            bufferedDocs += buffered.get();
+          }
+          activeReplicas += s.getReplicas().size();
+          Replica leader = s.getLeader();
+          if (leader == null) {
+            noLeader++;
+            if (!s.getReplicas().isEmpty()) {
+              leader = s.getReplicas().iterator().next();
+            }
+          }
+          ReplicaInfo ri = null;
+          if (leader != null) {
+            ri = getReplicaInfo(leader);
+            if (ri == null) {
+              log.warn("Unknown ReplicaInfo for {}", leader);
+            }
+          }
+          if (ri != null) {
+            Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
+            Number delDocs = (Number)ri.getVariable("SEARCHER.searcher.deleteDocs");
+            Number numBytes = (Number)ri.getVariable("INDEX.sizeInBytes");
+            if (numDocs != null) {
+              docs.addValue(numDocs.doubleValue());
+            }
+            if (delDocs != null) {
+              deletedDocs += delDocs.longValue();
+            }
+            if (numBytes != null) {
+              bytes.addValue(numBytes.doubleValue());
+            }
+          }
+        }
+        perColl.put("shardsState", shardState);
+        perColl.put("  shardsWithoutLeader", noLeader);
+        perColl.put("totalReplicas", totalReplicas);
+        perColl.put("  activeReplicas", activeReplicas);
+        perColl.put("  inactiveReplicas", totalReplicas - activeReplicas);
+        long totalDocs = (long)docs.getSum() + bufferedDocs;
+        perColl.put("totalActiveDocs", String.format(Locale.ROOT, "%,d", totalDocs));
+        perColl.put("  bufferedDocs", String.format(Locale.ROOT, "%,d", bufferedDocs));
+        perColl.put("  maxActiveSliceDocs", String.format(Locale.ROOT, "%,d", (long)docs.getMax()));
+        perColl.put("  minActiveSliceDocs", String.format(Locale.ROOT, "%,d", (long)docs.getMin()));
+        perColl.put("  avgActiveSliceDocs", String.format(Locale.ROOT, "%,.0f", docs.getMean()));
+        perColl.put("totalInactiveDocs", String.format(Locale.ROOT, "%,d", (long)inactiveDocs.getSum()));
+        perColl.put("  maxInactiveSliceDocs", String.format(Locale.ROOT, "%,d", (long)inactiveDocs.getMax()));
+        perColl.put("  minInactiveSliceDocs", String.format(Locale.ROOT, "%,d", (long)inactiveDocs.getMin()));
+        perColl.put("  avgInactiveSliceDocs", String.format(Locale.ROOT, "%,.0f", inactiveDocs.getMean()));
+        perColl.put("totalActiveBytes", String.format(Locale.ROOT, "%,d", (long)bytes.getSum()));
+        perColl.put("  maxActiveSliceBytes", String.format(Locale.ROOT, "%,d", (long)bytes.getMax()));
+        perColl.put("  minActiveSliceBytes", String.format(Locale.ROOT, "%,d", (long)bytes.getMin()));
+        perColl.put("  avgActiveSliceBytes", String.format(Locale.ROOT, "%,.0f", bytes.getMean()));
+        perColl.put("totalInactiveBytes", String.format(Locale.ROOT, "%,d", (long)inactiveBytes.getSum()));
+        perColl.put("  maxInactiveSliceBytes", String.format(Locale.ROOT, "%,d", (long)inactiveBytes.getMax()));
+        perColl.put("  minInactiveSliceBytes", String.format(Locale.ROOT, "%,d", (long)inactiveBytes.getMin()));
+        perColl.put("  avgInactiveSliceBytes", String.format(Locale.ROOT, "%,.0f", inactiveBytes.getMean()));
+        perColl.put("totalActiveDeletedDocs", String.format(Locale.ROOT, "%,d", deletedDocs));
       });
-      // check collProps and sliceProps too
-      collProperties.forEach((coll, props) -> collections.add(coll));
-      sliceProperties.forEach((coll, slices) -> collections.add(coll));
-      return new ArrayList<>(collections);
+      return stats;
     } finally {
       lock.unlock();
     }
@@ -1700,6 +2055,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     lock.lock();
     collectionsStatesRef.set(null);
     saveClusterState.set(true);
+    log.debug("** creating new collection states");
     try {
       Map<String, Map<String, Map<String, Replica>>> collMap = new HashMap<>();
       nodeReplicaMap.forEach((n, replicas) -> {
@@ -1741,7 +2097,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
         Map<String, Object> collProps = collProperties.computeIfAbsent(coll, c -> new ConcurrentHashMap<>());
         Map<String, Object> routerProp = (Map<String, Object>) collProps.getOrDefault(DocCollection.DOC_ROUTER, Collections.singletonMap("name", DocRouter.DEFAULT_NAME));
         DocRouter router = DocRouter.getDocRouter((String)routerProp.getOrDefault("name", DocRouter.DEFAULT_NAME));
-        DocCollection dc = new DocCollection(coll, slices, collProps, router, clusterStateVersion, ZkStateReader.CLUSTER_STATE);
+        DocCollection dc = new DocCollection(coll, slices, collProps, router, clusterStateVersion + 1, ZkStateReader.CLUSTER_STATE);
         res.put(coll, dc);
       });
       collectionsStatesRef.set(res);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimDistribStateManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimDistribStateManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimDistribStateManager.java
index 1e99ff2..2b8940a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimDistribStateManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimDistribStateManager.java
@@ -214,6 +214,8 @@ public class SimDistribStateManager implements DistribStateManager {
   private final String id;
   private final Node root;
 
+  private int juteMaxbuffer = 0xfffff;
+
   public SimDistribStateManager() {
     this(null);
   }
@@ -226,6 +228,8 @@ public class SimDistribStateManager implements DistribStateManager {
     this.id = IdUtils.timeRandomId();
     this.root = root != null ? root : createNewRootNode();
     watchersPool = ExecutorUtil.newMDCAwareFixedThreadPool(10, new DefaultSolrThreadFactory("sim-watchers"));
+    String bufferSize = System.getProperty("jute.maxbuffer", Integer.toString(0xffffff));
+    juteMaxbuffer = Integer.parseInt(bufferSize);
   }
 
   public SimDistribStateManager(ActionThrottle actionThrottle, ActionError actionError) {
@@ -493,6 +497,9 @@ public class SimDistribStateManager implements DistribStateManager {
 
   @Override
   public void setData(String path, byte[] data, int version) throws NoSuchElementException, BadVersionException, IOException {
+    if (data.length > juteMaxbuffer) {
+      throw new IOException("Len error " + data.length);
+    }
     multiLock.lock();
     Node n = null;
     try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
index 9673fa7..5f9293b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
@@ -233,6 +233,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
   }
 
   private static final Pattern REGISTRY_PATTERN = Pattern.compile("^solr\\.core\\.([\\w.-_]+?)\\.(shard[\\d_]+?)\\.(replica.*)");
+  private static final Pattern METRIC_KEY_PATTERN = Pattern.compile("^metrics:([^:]+?):([^:]+?)(:([^:]+))?$");
   /**
    * Simulate getting replica metrics values. This uses per-replica properties set in
    * {@link SimClusterStateProvider#simSetCollectionValue(String, String, Object, boolean, boolean)} and
@@ -245,33 +246,31 @@ public class SimNodeStateProvider implements NodeStateProvider {
     if (!liveNodesSet.contains(node)) {
       throw new RuntimeException("non-live node " + node);
     }
-    List<ReplicaInfo> replicas = clusterStateProvider.simGetReplicaInfos(node);
-    if (replicas == null || replicas.isEmpty()) {
-      return Collections.emptyMap();
-    }
     Map<String, Object> values = new HashMap<>();
     for (String tag : tags) {
-      String[] parts = tag.split(":");
-      if (parts.length < 3 || !parts[0].equals("metrics")) {
+      Matcher m = METRIC_KEY_PATTERN.matcher(tag);
+      if (!m.matches() || m.groupCount() < 2) {
         log.warn("Invalid metrics: tag: " + tag);
         continue;
       }
-      if (!parts[1].startsWith("solr.core.")) {
+      String registryName = m.group(1);
+      String key = m.group(3) != null ? m.group(2) + m.group(3) : m.group(2);
+      if (!registryName.startsWith("solr.core.")) {
         // skip - this is probably solr.node or solr.jvm metric
         continue;
       }
-      Matcher m = REGISTRY_PATTERN.matcher(parts[1]);
+      m = REGISTRY_PATTERN.matcher(registryName);
 
       if (!m.matches()) {
-        log.warn("Invalid registry name: " + parts[1]);
+        log.warn("Invalid registry name: " + registryName);
         continue;
       }
       String collection = m.group(1);
       String shard = m.group(2);
       String replica = m.group(3);
-      String key = parts.length > 3 ? parts[2] + ":" + parts[3] : parts[2];
+      List<ReplicaInfo> replicas = clusterStateProvider.simGetReplicaInfos(collection, shard);
       replicas.forEach(r -> {
-        if (r.getCollection().equals(collection) && r.getShard().equals(shard) && r.getCore().endsWith(replica)) {
+        if (r.getNode().equals(node) && r.getCore().endsWith(replica)) {
           Object value = r.getVariables().get(key);
           if (value != null) {
             values.put(tag, value);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
index e593695..ea753bc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
@@ -92,7 +92,7 @@ public class TestSimExecutePlanAction extends SimSolrCloudTestCase {
     log.info("Collection ready after " + CloudTestUtils.waitForState(cluster, collectionName, 120, TimeUnit.SECONDS,
         CloudTestUtils.clusterShape(1, 2, false, true)) + "ms");
 
-    String sourceNodeName = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String sourceNodeName = cluster.getSimClusterStateProvider().simGetRandomNode();
     ClusterState clusterState = cluster.getClusterStateProvider().getClusterState();
     DocCollection docCollection = clusterState.getCollection(collectionName);
     List<Replica> replicas = docCollection.getReplicas(sourceNodeName);
@@ -181,7 +181,7 @@ public class TestSimExecutePlanAction extends SimSolrCloudTestCase {
     CloudTestUtils.waitForState(cluster, "Timed out waiting for replicas of new collection to be active",
         collectionName, CloudTestUtils.clusterShape(1, 2, false, true));
 
-    String sourceNodeName = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String sourceNodeName = cluster.getSimClusterStateProvider().simGetRandomNode();
     ClusterState clusterState = cluster.getClusterStateProvider().getClusterState();
     DocCollection docCollection = clusterState.getCollection(collectionName);
     List<Replica> replicas = docCollection.getReplicas(sourceNodeName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java
new file mode 100644
index 0000000..ab5295e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.autoscaling.sim;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Iterator;
+import java.util.Locale;
+
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.cloud.CloudTestUtils;
+import org.apache.solr.cloud.autoscaling.ExecutePlanAction;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.SolrInputField;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.util.LogLevel;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.autoscaling.AutoScalingHandlerTest.createAutoScalingRequest;
+
+/**
+ *
+ */
+@TimeoutSuite(millis = 48 * 3600 * 1000)
+@LogLevel("org.apache.solr.cloud.autoscaling=DEBUG;org.apache.solr.cloud.autoscaling.NodeLostTrigger=INFO;org.apache.client.solrj.cloud.autoscaling=DEBUG;org.apache.solr.cloud.autoscaling.ComputePlanAction=INFO;org.apache.solr.cloud.autoscaling.ExecutePlanAction=DEBUG;org.apache.solr.cloud.autoscaling.ScheduledTriggers=DEBUG")
+//@LogLevel("org.apache.solr.cloud.autoscaling=DEBUG;org.apache.solr.cloud.autoscaling.NodeLostTrigger=INFO;org.apache.client.solrj.cloud.autoscaling=DEBUG;org.apache.solr.cloud.CloudTestUtils=TRACE")
+public class TestSimExtremeIndexing extends SimSolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static final int SPEED = 100;
+  // use higher speed for larger scale tests
+  // private static final int SPEED = 500;
+  private static final int NUM_NODES = 200;
+
+  private static final long BATCH_SIZE = 200000;
+
+  private static final long NUM_BATCHES = 5000;
+  //  ... or use this for a 1 trillion docs test
+  //  private static final long NUM_BATCHES = 5000000;
+
+  // tweak this threshold to test the number of splits
+  private static final long ABOVE_SIZE = 20000000;
+
+
+  private static TimeSource timeSource;
+  private static SolrClient solrClient;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(NUM_NODES, TimeSource.get("simTime:" + SPEED));
+    timeSource = cluster.getTimeSource();
+    solrClient = cluster.simGetSolrClient();
+    cluster.simSetUseSystemCollection(false);
+  }
+
+  @AfterClass
+  public static void tearDownCluster() throws Exception {
+    solrClient = null;
+  }
+
+  @Test
+  public void testScaleUp() throws Exception {
+    String collectionName = "testScaleUp_collection";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
+        "conf", 2, 2).setMaxShardsPerNode(10);
+    create.process(solrClient);
+    CloudTestUtils.waitForState(cluster, "failed to create " + collectionName, collectionName,
+        CloudTestUtils.clusterShape(2, 2, false, true));
+
+    //long waitForSeconds = 3 + random().nextInt(5);
+    long waitForSeconds = 1;
+    String setTriggerCommand = "{" +
+        "'set-trigger' : {" +
+        "'name' : 'scaleUpTrigger'," +
+        "'event' : 'indexSize'," +
+        "'waitFor' : '" + waitForSeconds + "s'," +
+        "'aboveDocs' : " + ABOVE_SIZE + "," +
+        "'enabled' : true," +
+        "'actions' : [{'name' : 'compute_plan', 'class' : 'solr.ComputePlanAction'}," +
+        "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}]" +
+        "}}";
+    SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST, setTriggerCommand);
+    NamedList<Object> response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    long batchSize = BATCH_SIZE;
+    for (long i = 0; i < NUM_BATCHES; i++) {
+      addDocs(collectionName, i * batchSize, batchSize);
+      log.info(String.format(Locale.ROOT, "#### Total docs so far: %,d", ((i + 1) * batchSize)));
+      timeSource.sleep(waitForSeconds);
+    }
+    timeSource.sleep(60000);
+    QueryResponse rsp = solrClient.query(collectionName, params(CommonParams.Q, "*:*"));
+    SolrDocumentList docs = rsp.getResults();
+    assertNotNull(docs);
+    assertEquals(docs.toString(), batchSize * NUM_BATCHES, docs.getNumFound());
+  }
+
+  private void addDocs(String collection, long start, long count) throws Exception {
+    UpdateRequest ureq = new UpdateRequest();
+    ureq.setParam("collection", collection);
+    ureq.setDocIterator(new FakeDocIterator(start, count));
+    solrClient.request(ureq);
+  }
+
+  // lightweight generator of fake documents
+  // NOTE: this iterator only ever returns the same document, which works ok
+  // for our "index update" simulation. Obviously don't use this for real indexing.
+  private static class FakeDocIterator implements Iterator<SolrInputDocument> {
+    final SolrInputDocument doc = new SolrInputDocument();
+    final SolrInputField idField = new SolrInputField("id");
+
+    final long start, count;
+
+    long current, max;
+
+    FakeDocIterator(long start, long count) {
+      this.start = start;
+      this.count = count;
+      current = start;
+      max = start + count;
+      doc.put("id", idField);
+      idField.setValue("foo");
+    }
+
+    @Override
+    public boolean hasNext() {
+      return current < max;
+    }
+
+    @Override
+    public SolrInputDocument next() {
+      current++;
+      return doc;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimNodeLostTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimNodeLostTrigger.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimNodeLostTrigger.java
index 4ad0623..c1c5f4c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimNodeLostTrigger.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimNodeLostTrigger.java
@@ -131,7 +131,7 @@ public class TestSimNodeLostTrigger extends SimSolrCloudTestCase {
       trigger.setProcessor(noFirstRunProcessor);
       trigger.run();
 
-      String lostNode = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+      String lostNode = cluster.getSimClusterStateProvider().simGetRandomNode();
       cluster.simRemoveNode(lostNode, false);
       AtomicBoolean fired = new AtomicBoolean(false);
       trigger.setProcessor(event -> {


[18/29] lucene-solr:jira/http2: SOLR-11522: /autoscaling/suggestions now include rebalance options as well even if there are no violations

Posted by da...@apache.org.
SOLR-11522: /autoscaling/suggestions now include rebalance options as well even if there are no violations


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e16d7d69
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e16d7d69
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e16d7d69

Branch: refs/heads/jira/http2
Commit: e16d7d69935030d69abcf414b8ae5866465a9c08
Parents: 2b4717c
Author: Noble Paul <no...@apache.org>
Authored: Wed Sep 26 00:47:03 2018 +1000
Committer: Noble Paul <no...@apache.org>
Committed: Wed Sep 26 00:47:03 2018 +1000

----------------------------------------------------------------------
 solr/solrj/src/java/org/apache/solr/common/MapWriter.java     | 7 ++++++-
 .../solr/client/solrj/cloud/autoscaling/TestPolicy.java       | 2 +-
 .../solr/client/solrj/cloud/autoscaling/TestPolicy2.java      | 4 ++--
 3 files changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e16d7d69/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/MapWriter.java b/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
index 4bdc00d..9b1861a 100644
--- a/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
+++ b/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
@@ -80,7 +80,12 @@ public interface MapWriter extends MapSerializable {
 
   void writeMap(EntryWriter ew) throws IOException;
 
-
+  /**Get a child object value using json path
+   *
+   * @param path the full path to that object such as a/b/c[4]/d etc
+   * @param def the default
+   * @return the found value or default
+   */
   default Object _get(String path, Object def) {
     Object v = Utils.getObjectByPath(this, false, path);
     return v == null ? def : v;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e16d7d69/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index 2b11d0c..9c5be7f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -2666,7 +2666,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
     List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
     assertEquals(2, suggestions.size());
     for (Suggester.SuggestionInfo suggestion : suggestions) {
-      Utils.getObjectByPath(suggestion, true, "operation/move-replica/targetNode");
+      suggestion._get("operation/move-replica/targetNode", null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e16d7d69/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
index e902ff9..615fd4d 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
@@ -168,7 +168,7 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     suggestions = PolicyHelper.getSuggestions(new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson))
         , createCloudManager(state, metaData));
     assertEquals(1, suggestions.size());
-    String repName = (String) Utils.getObjectByPath(suggestions.get(0).operation, true, "command/move-replica/replica");
+    String repName = (String) suggestions.get(0)._get("operation/command/move-replica/replica", null);
 
     AtomicBoolean found = new AtomicBoolean(false);
     session.getNode("node1").forEachReplica(replicaInfo -> {
@@ -372,7 +372,7 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     assertEquals(2, suggestions.size());
     for (Suggester.SuggestionInfo suggestion : suggestions) {
       assertTrue(ImmutableSet.of("127.0.0.1:63219_solr", "127.0.0.1:63229_solr").contains(
-          Utils.getObjectByPath(suggestion, true, "operation/command/move-replica/targetNode")));
+          suggestion._get("operation/command/move-replica/targetNode", null)));
 
     }
   }


[23/29] lucene-solr:jira/http2: targetNode is not required for move-replica

Posted by da...@apache.org.
targetNode is not required for move-replica


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a6d39ba8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a6d39ba8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a6d39ba8

Branch: refs/heads/jira/http2
Commit: a6d39ba859eb81c9359ff9ae1f1683cfd70169b3
Parents: 03c9c04
Author: Noble Paul <no...@apache.org>
Authored: Thu Sep 27 15:25:17 2018 +1000
Committer: Noble Paul <no...@apache.org>
Committed: Thu Sep 27 15:27:08 2018 +1000

----------------------------------------------------------------------
 .../src/resources/apispec/collections.collection.Commands.json    | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6d39ba8/solr/solrj/src/resources/apispec/collections.collection.Commands.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/resources/apispec/collections.collection.Commands.json b/solr/solrj/src/resources/apispec/collections.collection.Commands.json
index b4e87b0..d77cb62 100644
--- a/solr/solrj/src/resources/apispec/collections.collection.Commands.json
+++ b/solr/solrj/src/resources/apispec/collections.collection.Commands.json
@@ -53,8 +53,7 @@
           "default": "true",
           "description": "For replicas that use shared filesystems allow 'in-place' move that reuses shared data."
         }
-      },
-      "required":["targetNode"]
+      }
     },
     "migrate-docs":{
       "type":"object",


[15/29] lucene-solr:jira/http2: SOLR-9317: Deduplicate node list before trying to find if all nodes are up. The test is also fixed to not send duplicate node names in the createNodeSet parameter.

Posted by da...@apache.org.
SOLR-9317: Deduplicate node list before trying to find if all nodes are up. The test is also fixed to not send duplicate node names in the createNodeSet parameter.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e437b2f1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e437b2f1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e437b2f1

Branch: refs/heads/jira/http2
Commit: e437b2f1ed80af6bb8b909f64511fe43829eb67d
Parents: 9bc4b8d
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Sep 25 12:14:16 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Tue Sep 25 12:14:16 2018 +0530

----------------------------------------------------------------------
 .../org/apache/solr/cloud/api/collections/Assign.java     |  4 +++-
 .../src/test/org/apache/solr/cloud/AddReplicaTest.java    | 10 ++++++----
 2 files changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e437b2f1/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index 42de84a..9b33f52 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -23,6 +23,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -332,7 +333,8 @@ public class Assign {
     if (createNodeSet instanceof List) {
       createNodeList = (List) createNodeSet;
     } else {
-      createNodeList = createNodeSet == null ? null : StrUtils.splitSmart((String) createNodeSet, ",", true);
+      // deduplicate
+      createNodeList = createNodeSet == null ? null : new ArrayList<>(new LinkedHashSet<>(StrUtils.splitSmart((String) createNodeSet, ",", true)));
     }
 
      HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e437b2f1/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
index 342a27d..8980ba8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
@@ -17,10 +17,9 @@
 package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
-import java.util.List;
+import java.util.LinkedHashSet;
 
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -98,9 +97,12 @@ public class AddReplicaTest extends SolrCloudTestCase {
 
     // but adding any number of replicas is supported if an explicit create node set is specified
     // so test that as well
-    List<String> createNodeSet = new ArrayList<>(2);
-    createNodeSet.add(cluster.getRandomJetty(random()).getNodeName());
+    LinkedHashSet<String> createNodeSet = new LinkedHashSet<>(2);
     createNodeSet.add(cluster.getRandomJetty(random()).getNodeName());
+    while (true)  {
+      String nodeName = cluster.getRandomJetty(random()).getNodeName();
+      if (createNodeSet.add(nodeName))  break;
+    }
     addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
         .setNrtReplicas(3)
         .setTlogReplicas(1)


[10/29] lucene-solr:jira/http2: SOLR-12776: Setting of TMP in solr.cmd causes invisibility of Solr to JDK tools

Posted by da...@apache.org.
SOLR-12776: Setting of TMP in solr.cmd causes invisibility of Solr to JDK tools


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/95cc6f4f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/95cc6f4f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/95cc6f4f

Branch: refs/heads/jira/http2
Commit: 95cc6f4f765f0e1f78e5199868089df1ffec91e9
Parents: e6e3dc7
Author: Erick Erickson <Er...@gmail.com>
Authored: Sun Sep 23 16:03:30 2018 -0700
Committer: Erick Erickson <Er...@gmail.com>
Committed: Sun Sep 23 16:03:30 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt  | 1 +
 solr/bin/solr.cmd | 4 ++--
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/95cc6f4f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 0f97dcf..2c4903b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -107,6 +107,7 @@ Bug Fixes
 * SOLR-11836: FacetStream works with bucketSizeLimit of -1 which will fetch all the buckets.
   (Alfonso Muñoz-Pomer Fuentes, Amrit Sarkar via Varun Thacker)
 
+* SOLR-12776: Setting of TMP in solr.cmd causes invisibility of Solr to JDK tools (Petr Bodnar via Erick Erickson)
 
 ==================  7.5.0 ==================
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/95cc6f4f/solr/bin/solr.cmd
----------------------------------------------------------------------
diff --git a/solr/bin/solr.cmd b/solr/bin/solr.cmd
index 1750c2f..005ea59 100755
--- a/solr/bin/solr.cmd
+++ b/solr/bin/solr.cmd
@@ -981,8 +981,8 @@ IF [%SOLR_LOGS_DIR%] == [] (
 )
 
 set "EXAMPLE_DIR=%SOLR_TIP%\example"
-set TMP=!SOLR_HOME:%EXAMPLE_DIR%=!
-IF NOT "%TMP%"=="%SOLR_HOME%" (
+set TMP_SOLR_HOME=!SOLR_HOME:%EXAMPLE_DIR%=!
+IF NOT "%TMP_SOLR_HOME%"=="%SOLR_HOME%" (
   set "SOLR_LOGS_DIR=%SOLR_HOME%\..\logs"
   set "LOG4J_CONFIG=file:///%SOLR_SERVER_DIR%\resources\log4j2.xml"
 )


[11/29] lucene-solr:jira/http2: update project DOAP files to the latest 7.5.0 release

Posted by da...@apache.org.
update project DOAP files to the latest 7.5.0 release


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/cecf31ed
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/cecf31ed
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/cecf31ed

Branch: refs/heads/jira/http2
Commit: cecf31ed299d8d3f8311a2c10acc41f2ef1c587d
Parents: 95cc6f4
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Sep 24 09:22:32 2018 +0200
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Sep 24 09:22:32 2018 +0200

----------------------------------------------------------------------
 dev-tools/doap/lucene.rdf | 7 +++++++
 dev-tools/doap/solr.rdf   | 7 +++++++
 2 files changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cecf31ed/dev-tools/doap/lucene.rdf
----------------------------------------------------------------------
diff --git a/dev-tools/doap/lucene.rdf b/dev-tools/doap/lucene.rdf
index a4afc8d..6b377b0 100644
--- a/dev-tools/doap/lucene.rdf
+++ b/dev-tools/doap/lucene.rdf
@@ -69,6 +69,13 @@
     <!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
     <release>
       <Version>
+        <name>lucene-7.5.0</name>
+        <created>2018-09-24</created>
+        <revision>7.5.0</revision>
+      </Version>
+    </release>
+    <release>
+      <Version>
         <name>lucene-7.4.0</name>
         <created>2018-06-27</created>
         <revision>7.4.0</revision>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cecf31ed/dev-tools/doap/solr.rdf
----------------------------------------------------------------------
diff --git a/dev-tools/doap/solr.rdf b/dev-tools/doap/solr.rdf
index c739fb7..92484bd 100644
--- a/dev-tools/doap/solr.rdf
+++ b/dev-tools/doap/solr.rdf
@@ -69,6 +69,13 @@
     <!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
     <release>
       <Version>
+        <name>solr-7.5.0</name>
+        <created>2018-09-24</created>
+        <revision>7.5.0</revision>
+      </Version>
+    </release>
+    <release>
+      <Version>
         <name>solr-7.4.0</name>
         <created>2018-06-27</created>
         <revision>7.4.0</revision>


[28/29] lucene-solr:jira/http2: SOLR-5163: edismax now throws an exception when qf refers to a nonexistent field

Posted by da...@apache.org.
SOLR-5163: edismax now throws an exception when qf refers to a nonexistent field


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9481c1f6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9481c1f6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9481c1f6

Branch: refs/heads/jira/http2
Commit: 9481c1f623b77214a2a14ad18efc59fb406ed765
Parents: 044bc2a
Author: Charles Sanders <sa...@gmail.com>
Authored: Thu Sep 27 15:53:26 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Thu Sep 27 15:53:26 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../solr/search/ExtendedDismaxQParser.java      | 89 +++++++++++++++++++-
 .../solr/search/TestExtendedDismaxParser.java   | 33 +++++++-
 3 files changed, 120 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9481c1f6/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9ef4145..408ab53 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -70,6 +70,8 @@ Other Changes
 * SOLR-12586: Upgrade ParseDateFieldUpdateProcessorFactory (present in "schemaless mode") to use Java 8's
   java.time.DateTimeFormatter instead of Joda time (see upgrade notes).  "Lenient" is enabled.  Removed Joda Time dependency.
   (David Smiley, Bar Rotstein)
+  
+* SOLR-5163: edismax now throws an exception when qf refers to a nonexistent field (Charles Sanders, David Smiley)
 
 * SOLR-12805: Store previous term (generation) of replica when start recovery process (Cao Manh Dat)
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9481c1f6/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
index 9d6cd59..3e99d76 100644
--- a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
@@ -48,6 +48,7 @@ import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.Version;
 import org.apache.solr.analysis.TokenizerChain;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.DisMaxParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -55,6 +56,8 @@ import org.apache.solr.parser.QueryParser;
 import org.apache.solr.parser.SolrQueryParserBase.MagicFieldName;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.search.ExtendedDismaxQParser.ExtendedSolrQueryParser.Alias;
 import org.apache.solr.util.SolrPluginUtils;
 
 import com.google.common.collect.Multimap;
@@ -144,6 +147,7 @@ public class ExtendedDismaxQParser extends QParser {
       ExtendedSolrQueryParser up = createEdismaxQueryParser(this, IMPOSSIBLE_FIELD_NAME);
       up.addAlias(IMPOSSIBLE_FIELD_NAME, config.tiebreaker, config.queryFields);
       addAliasesFromRequest(up, config.tiebreaker);
+      validateQueryFields(up);
       up.setPhraseSlop(config.qslop);     // slop for explicit user phrase queries
       up.setAllowLeadingWildcard(true);
       up.setAllowSubQueryParsing(config.userFields.isAllowed(MagicFieldName.QUERY.field));
@@ -206,6 +210,84 @@ public class ExtendedDismaxQParser extends QParser {
   }
   
   /**
+   * Validate query field names. Must be explicitly defined in the schema or match a dynamic field pattern.
+   * Checks source field(s) represented by a field alias
+   * 
+   * @param up parser used
+   * @throws SyntaxError for invalid field name
+   */
+  protected void validateQueryFields(ExtendedSolrQueryParser up) throws SyntaxError {
+    List<String> flds = new ArrayList<>(config.queryFields.keySet().size());
+    for (String fieldName : config.queryFields.keySet()) {
+      buildQueryFieldList(fieldName, up.getAlias(fieldName), flds, up);
+    }
+    
+    checkFieldsInSchema(flds);
+  }
+  
+  /**
+   * Build list of source (non-alias) query field names. Recursive through aliases.
+   * 
+   * @param fieldName query field name
+   * @param alias field alias
+   * @param flds list of query field names
+   * @param up parser used
+   * @throws SyntaxError for invalid field name
+   */
+  private void buildQueryFieldList(String fieldName, Alias alias, List<String> flds, ExtendedSolrQueryParser up) throws SyntaxError {
+    if (null == alias) {
+        flds.add(fieldName);
+        return;
+    }
+
+    up.validateCyclicAliasing(fieldName);
+    flds.addAll(getFieldsFromAlias(up, alias));
+  }
+  
+  /**
+   * Return list of source (non-alias) field names from an alias
+   * 
+   * @param up parser used
+   * @param a field alias
+   * @return list of source fields
+   * @throws SyntaxError for invalid field name
+   */
+  private List<String> getFieldsFromAlias(ExtendedSolrQueryParser up, Alias a) throws SyntaxError {
+    List<String> lst = new ArrayList<>();
+    for (String s : a.fields.keySet()) {
+      buildQueryFieldList(s, up.getAlias(s), lst, up);
+    }
+
+    return lst;
+  }
+  
+  /**
+   * Verify field name exists in schema, explicit or dynamic field pattern
+   * 
+   * @param fieldName source field name to verify
+   * @throws SyntaxError for invalid field name
+   */
+  private void checkFieldInSchema(String fieldName) throws SyntaxError {
+    try {
+        config.schema.getField(fieldName);
+    } catch (SolrException se) {
+        throw new SyntaxError("Query Field '" + fieldName + "' is not a valid field name", se);
+    }
+  }
+
+  /**
+   * Verify list of source field names
+   * 
+   * @param flds list of source field names to verify
+   * @throws SyntaxError for invalid field name
+   */
+  private void checkFieldsInSchema(List<String> flds) throws SyntaxError {
+    for (String fieldName : flds) {
+        checkFieldInSchema(fieldName);
+    }
+  }
+  
+  /**
    * Adds shingled phrase queries to all the fields specified in the pf, pf2 anf pf3 parameters
    * 
    */
@@ -1597,15 +1679,18 @@ public class ExtendedDismaxQParser extends QParser {
     protected  String[] boostFuncs;
 
     protected boolean splitOnWhitespace;
+    
+    protected IndexSchema schema;
 
     public ExtendedDismaxConfiguration(SolrParams localParams,
         SolrParams params, SolrQueryRequest req) {
       solrParams = SolrParams.wrapDefaults(localParams, params);
-      minShouldMatch = DisMaxQParser.parseMinShouldMatch(req.getSchema(), solrParams); // req.getSearcher() here causes searcher refcount imbalance
+      schema = req.getSchema();
+      minShouldMatch = DisMaxQParser.parseMinShouldMatch(schema, solrParams); // req.getSearcher() here causes searcher refcount imbalance
       final boolean forbidSubQueryByDefault = req.getCore().getSolrConfig().luceneMatchVersion.onOrAfter(Version.LUCENE_7_2_0);
       userFields = new UserFields(U.parseFieldBoosts(solrParams.getParams(DMP.UF)), forbidSubQueryByDefault);
       try {
-        queryFields = DisMaxQParser.parseQueryFields(req.getSchema(), solrParams);  // req.getSearcher() here causes searcher refcount imbalance
+        queryFields = DisMaxQParser.parseQueryFields(schema, solrParams);  // req.getSearcher() here causes searcher refcount imbalance
       } catch (SyntaxError e) {
         throw new RuntimeException(e);
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9481c1f6/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
index f77baaf..ff9a2c4 100644
--- a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
@@ -38,6 +38,7 @@ import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.util.SolrPluginUtils;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.noggit.ObjectBuilder;
@@ -672,7 +673,8 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
       try {
         h.query(req("defType","edismax", "q","blarg", "qf","field1", "f.field1.qf","field2 field3","f.field2.qf","field4 field5", "f.field4.qf","field5", "f.field5.qf","field6", "f.field3.qf","field6"));
       } catch (SolrException e) {
-        fail("This is not cyclic alising");
+        assertFalse("This is not cyclic alising", e.getCause().getMessage().contains("Field aliases lead to a cycle"));
+        assertTrue(e.getCause().getMessage().contains("not a valid field name"));
       }
       
       try {
@@ -683,7 +685,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
       }
       
       try {
-        h.query(req("defType","edismax", "q","who:(Zapp Pig)", "qf","field1", "f.who.qf","name","f.name.qf","myalias", "f.myalias.qf","who"));
+        h.query(req("defType","edismax", "q","who:(Zapp Pig)", "qf","text", "f.who.qf","name","f.name.qf","myalias", "f.myalias.qf","who"));
         fail("Cyclic alising not detected");
       } catch (SolrException e) {
         assertTrue(e.getCause().getMessage().contains("Field aliases lead to a cycle"));
@@ -2090,5 +2092,32 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
   public void killInfiniteRecursionParse() throws Exception {
     assertJQ(req("defType", "edismax", "q", "*", "qq", "{!edismax v=something}", "bq", "{!edismax v=$qq}"));
   }
+  
+  /** SOLR-5163 */ 
+  @Test
+  public void testValidateQueryFields() throws Exception {
+    // field aliasing covered by test - testAliasing
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.add("defType", "edismax");
+    params.add("df", "text");
+    params.add("q", "olive AND other");
+    params.add("qf", "subject^3 title");
+    params.add("debugQuery", "true");
+    
+    // test valid field names
+    try (SolrQueryRequest req = req(params)) {
+      String response = h.query(req);
+      response.contains("+DisjunctionMaxQuery((title:olive | (subject:oliv)^3.0)) +DisjunctionMaxQuery((title:other | (subject:other)^3.0))");
+    }
+    
+    // test invalid field name
+    params.set("qf", "subject^3 nosuchfield");
+    try (SolrQueryRequest req = req(params)) {
+      h.query(req);
+    } catch (Exception e) {
+      Assert.assertEquals("org.apache.solr.search.SyntaxError: Query Field 'nosuchfield' is not a valid field name", e.getMessage());
+    }
+    
+  }
 
 }


[20/29] lucene-solr:jira/http2: Fix comment typo in SolrConfig.

Posted by da...@apache.org.
Fix comment typo in SolrConfig.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/58167666
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/58167666
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/58167666

Branch: refs/heads/jira/http2
Commit: 58167666c393c261982ecd7b3d17201a76afda17
Parents: 05f935f
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue Sep 25 17:56:05 2018 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue Sep 25 17:56:39 2018 +0100

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/core/SolrConfig.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/58167666/solr/core/src/java/org/apache/solr/core/SolrConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrConfig.java b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
index 6bfa08d..fcae44e 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrConfig.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
@@ -382,7 +382,7 @@ public class SolrConfig extends Config implements MapSerializable {
   {
     // non-static setMaxClauseCount because the test framework sometimes reverts the value on us and
     // the static setting above is only executed once.  This re-sets the value every time a SolrConfig
-    // obect is created. See SOLR-10921
+    // object is created. See SOLR-10921
     BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE-1);
   }
 


[27/29] lucene-solr:jira/http2: SOLR-12652: Remove SolrMetricManager.overridableRegistryName()

Posted by da...@apache.org.
SOLR-12652: Remove SolrMetricManager.overridableRegistryName()


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/044bc2a4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/044bc2a4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/044bc2a4

Branch: refs/heads/jira/http2
Commit: 044bc2a48522cb9d1e112aa3be4f2d7e6c2ed498
Parents: 2369c89
Author: Peter Somogyi <ps...@apache.org>
Authored: Thu Sep 27 15:39:55 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Thu Sep 27 15:39:55 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../solr/handler/admin/MetricsHandler.java      |  4 +-
 .../handler/admin/MetricsHistoryHandler.java    |  4 +-
 .../apache/solr/metrics/SolrMetricManager.java  | 65 ++++++--------------
 .../metrics/reporters/SolrSlf4jReporter.java    |  4 +-
 .../reporters/solr/SolrClusterReporter.java     |  8 +--
 .../solr/metrics/SolrMetricManagerTest.java     | 14 -----
 7 files changed, 32 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/044bc2a4/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 914fa7c..9ef4145 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -73,6 +73,8 @@ Other Changes
 
 * SOLR-12805: Store previous term (generation) of replica when start recovery process (Cao Manh Dat)
 
+* SOLR-12652: Remove SolrMetricManager.overridableRegistryName method (Peter Somogyi via David Smiley)
+
 ==================  7.6.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/044bc2a4/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
index 1f1a820..752e021 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
@@ -257,7 +257,7 @@ public class MetricsHandler extends RequestHandlerBase implements PermissionName
             allRegistries = true;
             break;
           }
-          initialPrefixes.add(SolrMetricManager.overridableRegistryName(s.trim()));
+          initialPrefixes.add(SolrMetricManager.enforcePrefix(s.trim()));
         }
         if (allRegistries) {
           return metricManager.registryNames();
@@ -276,7 +276,7 @@ public class MetricsHandler extends RequestHandlerBase implements PermissionName
             allRegistries = true;
             break;
           }
-          initialPrefixes.add(SolrMetricManager.overridableRegistryName(s.trim()));
+          initialPrefixes.add(SolrMetricManager.enforcePrefix(s.trim()));
         }
         if (allRegistries) {
           return metricManager.registryNames();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/044bc2a4/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
index 1c74dba..b569fe8 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHistoryHandler.java
@@ -297,7 +297,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
   }
 
   public void removeHistory(String registry) throws IOException {
-    registry = SolrMetricManager.overridableRegistryName(registry);
+    registry = SolrMetricManager.enforcePrefix(registry);
     knownDbs.remove(registry);
     factory.remove(registry);
   }
@@ -586,7 +586,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
   }
 
   private RrdDef createDef(String registry, Group group) {
-    registry = SolrMetricManager.overridableRegistryName(registry);
+    registry = SolrMetricManager.enforcePrefix(registry);
 
     // base sampling period is collectPeriod - samples more frequent than
     // that will be dropped, samples less frequent will be interpolated

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/044bc2a4/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java b/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
index e9cb111..6e01204 100644
--- a/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
+++ b/solr/core/src/java/org/apache/solr/metrics/SolrMetricManager.java
@@ -354,13 +354,13 @@ public class SolrMetricManager {
   }
 
   /**
-   * Check whether a registry with a given (overridable) name already exists.
+   * Check whether a registry with a given name already exists.
    * @param name registry name
    * @return true if this name points to a registry that already exists, false otherwise
    */
   public boolean hasRegistry(String name) {
     Set<String> names = registryNames();
-    name = overridableRegistryName(name);
+    name = enforcePrefix(name);
     return names.contains(name);
   }
 
@@ -399,18 +399,13 @@ public class SolrMetricManager {
 
   /**
    * Check for predefined shared registry names. This compares the input name
-   * with normalized and possibly overriden names of predefined shared registries -
+   * with normalized names of predefined shared registries -
    * {@link #JVM_REGISTRY} and {@link #JETTY_REGISTRY}.
-   * @param registry already normalized and possibly overriden name
+   * @param registry already normalized name
    * @return true if the name matches one of shared registries
    */
   private static boolean isSharedRegistry(String registry) {
-    if (overridableRegistryName(JETTY_REGISTRY).equals(registry) ||
-        overridableRegistryName(JVM_REGISTRY).equals(registry)) {
-      return true;
-    } else {
-      return false;
-    }
+    return JETTY_REGISTRY.equals(registry) || JVM_REGISTRY.equals(registry);
   }
 
   /**
@@ -419,7 +414,7 @@ public class SolrMetricManager {
    * @return existing or newly created registry
    */
   public MetricRegistry registry(String registry) {
-    registry = overridableRegistryName(registry);
+    registry = enforcePrefix(registry);
     if (isSharedRegistry(registry)) {
       return SharedMetricRegistries.getOrCreate(registry);
     } else {
@@ -454,8 +449,8 @@ public class SolrMetricManager {
   public void removeRegistry(String registry) {
     // close any reporters for this registry first
     closeReporters(registry, null);
-    // make sure we use a name with prefix, with overrides
-    registry = overridableRegistryName(registry);
+    // make sure we use a name with prefix
+    registry = enforcePrefix(registry);
     if (isSharedRegistry(registry)) {
       SharedMetricRegistries.remove(registry);
     } else {
@@ -478,8 +473,8 @@ public class SolrMetricManager {
    *                  an empty one under the previous name.
    */
   public void swapRegistries(String registry1, String registry2) {
-    registry1 = overridableRegistryName(registry1);
-    registry2 = overridableRegistryName(registry2);
+    registry1 = enforcePrefix(registry1);
+    registry2 = enforcePrefix(registry2);
     if (isSharedRegistry(registry1) || isSharedRegistry(registry2)) {
       throw new UnsupportedOperationException("Cannot swap shared registry: " + registry1 + ", " + registry2);
     }
@@ -748,26 +743,6 @@ public class SolrMetricManager {
   }
 
   /**
-   * Allows named registries to be renamed using System properties.
-   * This would be mostly be useful if you want to combine the metrics from a few registries for a single
-   * reporter.
-   * <p>For example, in order to collect metrics from related cores in a single registry you could specify
-   * the following system properties:</p>
-   * <pre>
-   *   ... -Dsolr.core.collection1=solr.core.allCollections -Dsolr.core.collection2=solr.core.allCollections
-   * </pre>
-   * <b>NOTE:</b> Once a registry is renamed in a way that its metrics are combined with another repository
-   * it is no longer possible to retrieve the original metrics until this renaming is removed and the Solr
-   * {@link org.apache.solr.core.SolrInfoBean.Group} of components that reported to that name is restarted.
-   * @param registry The name of the registry
-   * @return A potentially overridden (via System properties) registry name
-   */
-  public static String overridableRegistryName(String registry) {
-    String fqRegistry = enforcePrefix(registry);
-    return enforcePrefix(System.getProperty(fqRegistry,fqRegistry));
-  }
-
-  /**
    * Enforces the leading {@link #REGISTRY_NAME_PREFIX} in a name.
    * @param name input name, possibly without the prefix
    * @return original name if it contained the prefix, or the
@@ -805,7 +780,7 @@ public class SolrMetricManager {
     } else {
       fullName = MetricRegistry.name(group.toString(), names);
     }
-    return overridableRegistryName(fullName);
+    return enforcePrefix(fullName);
   }
 
   // reporter management
@@ -838,7 +813,7 @@ public class SolrMetricManager {
           String[] targets = target.split("[\\s,]+");
           boolean found = false;
           for (String t : targets) {
-            t = overridableRegistryName(t);
+            t = enforcePrefix(t);
             if (registryName.equals(t)) {
               found = true;
               break;
@@ -914,8 +889,8 @@ public class SolrMetricManager {
       throw new IllegalArgumentException("loadReporter called with missing arguments: " +
           "registry=" + registry + ", loader=" + loader + ", pluginInfo=" + pluginInfo);
     }
-    // make sure we use a name with prefix, with overrides
-    registry = overridableRegistryName(registry);
+    // make sure we use a name with prefix
+    registry = enforcePrefix(registry);
     SolrMetricReporter reporter = loader.newInstance(
         pluginInfo.className,
         SolrMetricReporter.class,
@@ -987,8 +962,8 @@ public class SolrMetricManager {
    * @return true if a named reporter existed and was closed.
    */
   public boolean closeReporter(String registry, String name, String tag) {
-    // make sure we use a name with prefix, with overrides
-    registry = overridableRegistryName(registry);
+    // make sure we use a name with prefix
+    registry = enforcePrefix(registry);
     try {
       if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
         log.warn("Could not obtain lock to modify reporters registry: " + registry);
@@ -1038,8 +1013,8 @@ public class SolrMetricManager {
    * @return names of closed reporters
    */
   public Set<String> closeReporters(String registry, String tag) {
-    // make sure we use a name with prefix, with overrides
-    registry = overridableRegistryName(registry);
+    // make sure we use a name with prefix
+    registry = enforcePrefix(registry);
     try {
       if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
         log.warn("Could not obtain lock to modify reporters registry: " + registry);
@@ -1085,8 +1060,8 @@ public class SolrMetricManager {
    * @return map of reporters and their names, may be empty but never null
    */
   public Map<String, SolrMetricReporter> getReporters(String registry) {
-    // make sure we use a name with prefix, with overrides
-    registry = overridableRegistryName(registry);
+    // make sure we use a name with prefix
+    registry = enforcePrefix(registry);
     try {
       if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
         log.warn("Could not obtain lock to modify reporters registry: " + registry);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/044bc2a4/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java
index ff00a00..e65d119 100644
--- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java
+++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrSlf4jReporter.java
@@ -131,9 +131,9 @@ public class SolrSlf4jReporter extends FilteringSolrMetricReporter {
     if (logger == null || logger.isEmpty()) {
       // construct logger name from Group
       if (pluginInfo.attributes.containsKey("group")) {
-        logger = SolrMetricManager.overridableRegistryName(pluginInfo.attributes.get("group"));
+        logger = SolrMetricManager.enforcePrefix(pluginInfo.attributes.get("group"));
       } else if (pluginInfo.attributes.containsKey("registry")) {
-        String reg = SolrMetricManager.overridableRegistryName(pluginInfo.attributes.get("registry"));
+        String reg = SolrMetricManager.enforcePrefix(pluginInfo.attributes.get("registry"));
         String[] names = reg.split("\\.");
         if (names.length < 2) {
           logger = reg;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/044bc2a4/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java
index 17390e1..c1b424f 100644
--- a/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java
+++ b/solr/core/src/java/org/apache/solr/metrics/reporters/solr/SolrClusterReporter.java
@@ -94,14 +94,14 @@ import static org.apache.solr.common.params.CommonParams.ID;
 public class SolrClusterReporter extends SolrCoreContainerReporter {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  public static final String CLUSTER_GROUP = SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.cluster.toString());
+  public static final String CLUSTER_GROUP = SolrMetricManager.enforcePrefix(SolrInfoBean.Group.cluster.toString());
 
   public static final List<SolrReporter.Report> DEFAULT_REPORTS = new ArrayList<SolrReporter.Report>() {{
     add(new SolrReporter.Report(CLUSTER_GROUP, "jetty",
-        SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.jetty.toString()),
+        SolrMetricManager.enforcePrefix(SolrInfoBean.Group.jetty.toString()),
         Collections.emptySet())); // all metrics
     add(new SolrReporter.Report(CLUSTER_GROUP, "jvm",
-        SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.jvm.toString()),
+        SolrMetricManager.enforcePrefix(SolrInfoBean.Group.jvm.toString()),
         new HashSet<String>() {{
           add("memory\\.total\\..*");
           add("memory\\.heap\\..*");
@@ -111,7 +111,7 @@ public class SolrClusterReporter extends SolrCoreContainerReporter {
           add("os\\.OpenFileDescriptorCount");
           add("threads\\.count");
         }}));
-    add(new SolrReporter.Report(CLUSTER_GROUP, "node", SolrMetricManager.overridableRegistryName(SolrInfoBean.Group.node.toString()),
+    add(new SolrReporter.Report(CLUSTER_GROUP, "node", SolrMetricManager.enforcePrefix(SolrInfoBean.Group.node.toString()),
         new HashSet<String>() {{
           add("CONTAINER\\.cores\\..*");
           add("CONTAINER\\.fs\\..*");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/044bc2a4/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java b/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java
index 9e15acf..e95b73e 100644
--- a/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java
+++ b/solr/core/src/test/org/apache/solr/metrics/SolrMetricManagerTest.java
@@ -37,20 +37,6 @@ import org.junit.Test;
 public class SolrMetricManagerTest extends SolrTestCaseJ4 {
 
   @Test
-  public void testOverridableRegistryName() throws Exception {
-    Random r = random();
-    String originalName = TestUtil.randomSimpleString(r, 1, 10);
-    String targetName = TestUtil.randomSimpleString(r, 1, 10);
-    // no override
-    String result = SolrMetricManager.overridableRegistryName(originalName);
-    assertEquals(SolrMetricManager.REGISTRY_NAME_PREFIX + originalName, result);
-    // with override
-    System.setProperty(SolrMetricManager.REGISTRY_NAME_PREFIX + originalName, targetName);
-    result = SolrMetricManager.overridableRegistryName(originalName);
-    assertEquals(SolrMetricManager.REGISTRY_NAME_PREFIX + targetName, result);
-  }
-
-  @Test
   public void testSwapRegistries() throws Exception {
     Random r = random();
 


[24/29] lucene-solr:jira/http2: SOLR-12756: Refactor Assign and extract replica placement strategies out of it.

Posted by da...@apache.org.
SOLR-12756: Refactor Assign and extract replica placement strategies out of it.

Now, assignment is done with the help of a builder class instead of calling a method with large number of arguments. The number of special cases that had to be handled have been cut down as well.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c587410f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c587410f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c587410f

Branch: refs/heads/jira/http2
Commit: c587410f99375005c680ece5e24a4dfd40d8d3eb
Parents: a6d39ba
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Thu Sep 27 16:15:38 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Thu Sep 27 16:15:38 2018 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   4 +
 .../cloud/api/collections/AddReplicaCmd.java    |  45 +--
 .../solr/cloud/api/collections/Assign.java      | 358 ++++++++++++-------
 .../api/collections/CreateCollectionCmd.java    |  34 +-
 .../cloud/api/collections/ReplaceNodeCmd.java   |  33 +-
 .../solr/cloud/api/collections/RestoreCmd.java  |  16 +-
 .../cloud/api/collections/SplitShardCmd.java    |  17 +-
 .../solr/cloud/overseer/ReplicaMutator.java     |   4 +-
 .../CollectionTooManyReplicasTest.java          |   8 +-
 .../solr/cloud/autoscaling/TestPolicyCloud.java |   1 +
 .../sim/SimClusterStateProvider.java            |  24 +-
 .../autoscaling/sim/TestSimPolicyCloud.java     |   1 +
 .../solr/common/cloud/ReplicaPosition.java      |   2 +-
 13 files changed, 331 insertions(+), 216 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 98fb204..914fa7c 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -105,6 +105,10 @@ Other Changes
 * SOLR-12762: Fix javadoc for SolrCloudTestCase.clusterShape() method and add a method that validates only against
   Active slices (Anshum Gupta)
 
+* SOLR-12756: Refactor Assign and extract replica placement strategies out of it. Now, assignment is done with the help
+  of a builder class instead of calling a method with large number of arguments. The number of special cases that had
+  to be handled have been cut down as well. (shalin)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
index f128c2e..6e851db 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
@@ -34,10 +34,8 @@ import java.util.stream.Collectors;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
 import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
 import org.apache.solr.cloud.ActiveReplicaWatcher;
-import org.apache.solr.cloud.CloudUtil;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.common.SolrCloseableLatch;
 import org.apache.solr.common.SolrException;
@@ -76,6 +74,12 @@ import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STA
 public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  /**
+   * When AddReplica is called with this set to true, then we do not try to find node assignments
+   * for the add replica API. If set to true, a valid "node" should be specified.
+   */
+  public static final String SKIP_NODE_ASSIGNMENT = "skipNodeAssignment";
+
   private final OverseerCollectionMessageHandler ocmh;
 
   public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
@@ -213,6 +217,8 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
             ZkStateReader.COLLECTION_PROP, withCollectionName,
             ZkStateReader.SHARD_ID_PROP, withCollectionShard,
             "node", createReplica.node,
+            // since we already computed node assignments (which include assigning a node for this withCollection replica) we want to skip the assignment step
+            SKIP_NODE_ASSIGNMENT, "true",
             CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.TRUE.toString()); // set to true because we want `withCollection` to be ready after this collection is created
         addReplica(clusterState, props, results, null);
       }
@@ -300,7 +306,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
       coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
     }
 
-    log.info("Node Identified {} for creating new replica of shard {}", node, shard);
+    log.info("Node Identified {} for creating new replica of shard {} for collection {}", node, shard, collection);
     if (!clusterState.liveNodesContain(node)) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
     }
@@ -327,6 +333,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
                                                             EnumMap<Replica.Type, Integer> replicaTypeVsCount,
                                                             AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
     boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
+    boolean skipNodeAssignment = message.getBool(SKIP_NODE_ASSIGNMENT, false);
     String sliceName = message.getStr(SHARD_ID_PROP);
     DocCollection collection = clusterState.getCollection(collectionName);
 
@@ -345,33 +352,11 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     }
 
     List<ReplicaPosition> positions = null;
-    if (!skipCreateReplicaInClusterState) {
-      if (CloudUtil.usePolicyFramework(collection, cloudManager)) {
-        if (node == null) {
-          if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
-          positions = Assign.identifyNodes(cloudManager,
-              clusterState,
-              Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM),
-              collection.getName(),
-              message,
-              Collections.singletonList(sliceName),
-              numNrtReplicas,
-              numTlogReplicas,
-              numPullReplicas);
-          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-        }
-      } else {
-        List<Assign.ReplicaCount> sortedNodeList = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, numNrtReplicas,
-            numTlogReplicas, numPullReplicas, createNodeSetStr, cloudManager);
-        int i = 0;
-        positions = new ArrayList<>();
-        for (Map.Entry<Replica.Type, Integer> e : replicaTypeVsCount.entrySet()) {
-          for (int j = 0; j < e.getValue(); j++) {
-            positions.add(new ReplicaPosition(sliceName, j + 1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
-            i++;
-          }
-        }
-      }
+    if (!skipCreateReplicaInClusterState && !skipNodeAssignment) {
+
+      positions = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, numNrtReplicas,
+                    numTlogReplicas, numPullReplicas, createNodeSetStr, cloudManager);
+      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
     }
 
     if (positions == null)  {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index 9b33f52..542ca1b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -21,12 +21,14 @@ import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Random;
 import java.util.Set;
 import java.util.stream.Collectors;
@@ -217,12 +219,7 @@ public class Assign {
   }
 
   public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
-    // TODO: add smarter options that look at the current number of cores per
-    // node?
-    // for now we just go random (except when createNodeSet and createNodeSet.shuffle=false are passed in)
-
     List<String> nodeList;
-
     final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
     final List<String> createNodeList = (createNodeSetStr == null) ? null :
         StrUtils.splitSmart((OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY.equals(createNodeSetStr) ?
@@ -243,67 +240,6 @@ public class Assign {
     return nodeList;
   }
 
-  public static List<ReplicaPosition> identifyNodes(SolrCloudManager cloudManager,
-                                                    ClusterState clusterState,
-                                                    List<String> nodeList,
-                                                    String collectionName,
-                                                    ZkNodeProps message,
-                                                    List<String> shardNames,
-                                                    int numNrtReplicas,
-                                                    int numTlogReplicas,
-                                                    int numPullReplicas) throws IOException, InterruptedException, AssignmentException {
-    List<Map> rulesMap = (List) message.get("rule");
-    String policyName = message.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-
-    if (rulesMap == null && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      log.debug("Identify nodes using default");
-      int i = 0;
-      List<ReplicaPosition> result = new ArrayList<>();
-      for (String aShard : shardNames)
-        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
-            Replica.Type.TLOG, numTlogReplicas,
-            Replica.Type.PULL, numPullReplicas
-        ).entrySet()) {
-          for (int j = 0; j < e.getValue(); j++){
-            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
-            i++;
-          }
-        }
-      return result;
-    } else {
-      if (numTlogReplicas + numPullReplicas != 0 && rulesMap != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules");
-      }
-    }
-
-    if (rulesMap != null && !rulesMap.isEmpty()) {
-      List<Rule> rules = new ArrayList<>();
-      for (Object map : rulesMap) rules.add(new Rule((Map) map));
-      Map<String, Integer> sharVsReplicaCount = new HashMap<>();
-
-      for (String shard : shardNames) sharVsReplicaCount.put(shard, numNrtReplicas);
-      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
-          sharVsReplicaCount,
-          (List<Map>) message.get(SNITCH),
-          new HashMap<>(),//this is a new collection. So, there are no nodes in any shard
-          nodeList,
-          cloudManager,
-          clusterState);
-
-      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
-      return nodeMappings.entrySet().stream()
-          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
-          .collect(Collectors.toList());
-    } else  {
-      if (message.getStr(CREATE_NODE_SET) == null)
-        nodeList = Collections.emptyList();// unless explicitly specified do not pass node list to Policy
-      return getPositionsUsingPolicy(collectionName,
-          shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, cloudManager, nodeList);
-    }
-  }
-
   static class ReplicaCount {
     public final String nodeName;
     public int thisCollectionNodes = 0;
@@ -318,11 +254,11 @@ public class Assign {
     }
   }
 
-  // Only called from createShard and addReplica (so far).
+  // Only called from addReplica (and by extension createShard) (so far).
   //
   // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
   // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
-  public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
+  public static List<ReplicaPosition> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
                                                           String shard, int nrtReplicas, int tlogReplicas, int pullReplicas,
                                                           Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException, AssignmentException {
     log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}", shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet );
@@ -331,13 +267,13 @@ public class Assign {
     List<String> createNodeList = null;
 
     if (createNodeSet instanceof List) {
-      createNodeList = (List) createNodeSet;
+      createNodeList = (List<String>) createNodeSet;
     } else {
       // deduplicate
       createNodeList = createNodeSet == null ? null : new ArrayList<>(new LinkedHashSet<>(StrUtils.splitSmart((String) createNodeSet, ",", true)));
     }
 
-     HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
+    HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
 
     if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
       long availableSlots = 0;
@@ -349,40 +285,22 @@ public class Assign {
       }
       if (availableSlots < nrtReplicas + tlogReplicas + pullReplicas) {
         throw new AssignmentException(
-            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of live nodes and a maxShardsPerNode of %d",
-                nrtReplicas, collectionName, maxShardsPerNode));
+            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of eligible live nodes %d and a maxShardsPerNode of %d",
+                nrtReplicas, collectionName, nodeNameVsShardCount.size(), maxShardsPerNode));
       }
     }
 
-    List l = (List) coll.get(DocCollection.RULE);
-    List<ReplicaPosition> replicaPositions = null;
-    if (l != null) {
-      if (tlogReplicas + pullReplicas > 0)  {
-        throw new AssignmentException(Replica.Type.TLOG + " or " + Replica.Type.PULL +
-            " replica types not supported with placement rules");
-      }
-      // TODO: make it so that this method doesn't require access to CC
-      replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cloudManager, coll, createNodeList, l);
-    }
-    String policyName = coll.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, tlogReplicas, pullReplicas,
-          policyName, cloudManager, createNodeList);
-    }
-
-    if(replicaPositions != null){
-      List<ReplicaCount> repCounts = new ArrayList<>();
-      for (ReplicaPosition p : replicaPositions) {
-        repCounts.add(new ReplicaCount(p.node));
-      }
-      return repCounts;
-    }
-
-    ArrayList<ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
-    Collections.sort(sortedNodeList, (x, y) -> (x.weight() < y.weight()) ? -1 : ((x.weight() == y.weight()) ? 0 : 1));
-    return sortedNodeList;
-
+    AssignRequest assignRequest = new AssignRequestBuilder()
+        .forCollection(collectionName)
+        .forShard(Collections.singletonList(shard))
+        .assignNrtReplicas(nrtReplicas)
+        .assignTlogReplicas(tlogReplicas)
+        .assignPullReplicas(pullReplicas)
+        .onNodes(createNodeList)
+        .build();
+    AssignStrategyFactory assignStrategyFactory = new AssignStrategyFactory(cloudManager);
+    AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, coll);
+    return assignStrategy.assign(cloudManager, assignRequest);
   }
 
   public static List<ReplicaPosition> getPositionsUsingPolicy(String collName, List<String> shardNames,
@@ -418,35 +336,7 @@ public class Assign {
     }
   }
 
-  private static List<ReplicaPosition> getNodesViaRules(ClusterState clusterState, String shard, int numberOfNodes,
-                                                        SolrCloudManager cloudManager, DocCollection coll, List<String> createNodeList, List l) {
-    ArrayList<Rule> rules = new ArrayList<>();
-    for (Object o : l) rules.add(new Rule((Map) o));
-    Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
-    for (Slice slice : coll.getSlices()) {
-      LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
-      shardVsNodes.put(slice.getName(), n);
-      for (Replica replica : slice.getReplicas()) {
-        Integer count = n.get(replica.getNodeName());
-        if (count == null) count = 0;
-        n.put(replica.getNodeName(), ++count);
-      }
-    }
-    List snitches = (List) coll.get(SNITCH);
-    List<String> nodesList = createNodeList == null ?
-        new ArrayList<>(clusterState.getLiveNodes()) :
-        createNodeList;
-    Map<ReplicaPosition, String> positions = new ReplicaAssigner(
-        rules,
-        Collections.singletonMap(shard, numberOfNodes),
-        snitches,
-        shardVsNodes,
-        nodesList, cloudManager, clusterState).getNodeMappings();
-
-    return positions.entrySet().stream().map(e -> e.getKey().setNode(e.getValue())).collect(Collectors.toList());// getReplicaCounts(positions);
-  }
-
-  private static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
+  static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
                                                                        ClusterState clusterState, List<String> createNodeList) {
     Set<String> nodes = clusterState.getLiveNodes();
 
@@ -477,7 +367,7 @@ public class Assign {
         for (Replica replica : replicas) {
           ReplicaCount count = nodeNameVsShardCount.get(replica.getNodeName());
           if (count != null) {
-            count.totalNodes++; // Used ot "weigh" whether this node should be used later.
+            count.totalNodes++; // Used to "weigh" whether this node should be used later.
             if (entry.getKey().equals(collectionName)) {
               count.thisCollectionNodes++;
               if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
@@ -513,4 +403,210 @@ public class Assign {
       super(message, cause, enableSuppression, writableStackTrace);
     }
   }
+
+  public interface AssignStrategy {
+    List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest)
+        throws Assign.AssignmentException, IOException, InterruptedException;
+  }
+
+  public static class AssignRequest {
+    public String collectionName;
+    public List<String> shardNames;
+    public List<String> nodes;
+    public int numNrtReplicas;
+    public int numTlogReplicas;
+    public int numPullReplicas;
+
+    public AssignRequest(String collectionName, List<String> shardNames, List<String> nodes, int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
+      this.collectionName = collectionName;
+      this.shardNames = shardNames;
+      this.nodes = nodes;
+      this.numNrtReplicas = numNrtReplicas;
+      this.numTlogReplicas = numTlogReplicas;
+      this.numPullReplicas = numPullReplicas;
+    }
+  }
+
+  public static class AssignRequestBuilder {
+    private String collectionName;
+    private List<String> shardNames;
+    private List<String> nodes;
+    private int numNrtReplicas;
+    private int numTlogReplicas;
+    private int numPullReplicas;
+
+    public AssignRequestBuilder forCollection(String collectionName) {
+      this.collectionName = collectionName;
+      return this;
+    }
+
+    public AssignRequestBuilder forShard(List<String> shardNames) {
+      this.shardNames = shardNames;
+      return this;
+    }
+
+    public AssignRequestBuilder onNodes(List<String> nodes) {
+      this.nodes = nodes;
+      return this;
+    }
+
+    public AssignRequestBuilder assignNrtReplicas(int numNrtReplicas) {
+      this.numNrtReplicas = numNrtReplicas;
+      return this;
+    }
+
+    public AssignRequestBuilder assignTlogReplicas(int numTlogReplicas) {
+      this.numTlogReplicas = numTlogReplicas;
+      return this;
+    }
+
+    public AssignRequestBuilder assignPullReplicas(int numPullReplicas) {
+      this.numPullReplicas = numPullReplicas;
+      return this;
+    }
+
+    public AssignRequest build() {
+      Objects.requireNonNull(collectionName, "The collectionName cannot be null");
+      Objects.requireNonNull(shardNames, "The shard names cannot be null");
+      return new AssignRequest(collectionName, shardNames, nodes, numNrtReplicas,
+          numTlogReplicas, numPullReplicas);
+    }
+  }
+
+  public static class LegacyAssignStrategy implements AssignStrategy {
+    @Override
+    public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest) throws Assign.AssignmentException, IOException, InterruptedException {
+      ClusterState clusterState = solrCloudManager.getClusterStateProvider().getClusterState();
+      List<String> nodeList = assignRequest.nodes;
+
+      HashMap<String, Assign.ReplicaCount> nodeNameVsShardCount = Assign.getNodeNameVsShardCount(assignRequest.collectionName, clusterState, assignRequest.nodes);
+      if (nodeList == null || nodeList.isEmpty()) {
+        ArrayList<Assign.ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
+        sortedNodeList.sort(Comparator.comparingInt(Assign.ReplicaCount::weight));
+        nodeList = sortedNodeList.stream().map(replicaCount -> replicaCount.nodeName).collect(Collectors.toList());
+      }
+
+      int i = 0;
+      List<ReplicaPosition> result = new ArrayList<>();
+      for (String aShard : assignRequest.shardNames)
+        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, assignRequest.numNrtReplicas,
+            Replica.Type.TLOG, assignRequest.numTlogReplicas,
+            Replica.Type.PULL, assignRequest.numPullReplicas
+        ).entrySet()) {
+          for (int j = 0; j < e.getValue(); j++) {
+            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
+            i++;
+          }
+        }
+      return result;
+    }
+  }
+
+  public static class RulesBasedAssignStrategy implements AssignStrategy {
+    public List<Rule> rules;
+    public List snitches;
+    public ClusterState clusterState;
+
+    public RulesBasedAssignStrategy(List<Rule> rules, List snitches, ClusterState clusterState) {
+      this.rules = rules;
+      this.snitches = snitches;
+      this.clusterState = clusterState;
+    }
+
+    @Override
+    public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest) throws Assign.AssignmentException, IOException, InterruptedException {
+      if (assignRequest.numTlogReplicas + assignRequest.numPullReplicas != 0) {
+        throw new Assign.AssignmentException(
+            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
+      }
+
+      Map<String, Integer> shardVsReplicaCount = new HashMap<>();
+      for (String shard : assignRequest.shardNames) shardVsReplicaCount.put(shard, assignRequest.numNrtReplicas);
+
+      Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
+      DocCollection docCollection = solrCloudManager.getClusterStateProvider().getClusterState().getCollectionOrNull(assignRequest.collectionName);
+      if (docCollection != null) {
+        for (Slice slice : docCollection.getSlices()) {
+          LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
+          shardVsNodes.put(slice.getName(), n);
+          for (Replica replica : slice.getReplicas()) {
+            Integer count = n.get(replica.getNodeName());
+            if (count == null) count = 0;
+            n.put(replica.getNodeName(), ++count);
+          }
+        }
+      }
+
+      List<String> nodesList = assignRequest.nodes == null ? new ArrayList<>(clusterState.getLiveNodes()) : assignRequest.nodes;
+
+      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
+          shardVsReplicaCount,
+          snitches,
+          shardVsNodes,
+          nodesList,
+          solrCloudManager, clusterState);
+
+      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
+      return nodeMappings.entrySet().stream()
+          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
+          .collect(Collectors.toList());
+    }
+  }
+
+  public static class PolicyBasedAssignStrategy implements AssignStrategy {
+    public String policyName;
+
+    public PolicyBasedAssignStrategy(String policyName) {
+      this.policyName = policyName;
+    }
+
+    @Override
+    public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest) throws Assign.AssignmentException, IOException, InterruptedException {
+      return Assign.getPositionsUsingPolicy(assignRequest.collectionName,
+          assignRequest.shardNames, assignRequest.numNrtReplicas,
+          assignRequest.numTlogReplicas, assignRequest.numPullReplicas,
+          policyName, solrCloudManager, assignRequest.nodes);
+    }
+  }
+
+  public static class AssignStrategyFactory {
+    public SolrCloudManager solrCloudManager;
+
+    public AssignStrategyFactory(SolrCloudManager solrCloudManager) {
+      this.solrCloudManager = solrCloudManager;
+    }
+
+    public AssignStrategy create(ClusterState clusterState, DocCollection collection) throws IOException, InterruptedException {
+      List<Map> ruleMaps = (List<Map>) collection.get("rule");
+      String policyName = collection.getStr(POLICY);
+      List snitches = (List) collection.get(SNITCH);
+      AutoScalingConfig autoScalingConfig = solrCloudManager.getDistribStateManager().getAutoScalingConfig();
+
+      StrategyType strategyType = null;
+      if ((ruleMaps == null || ruleMaps.isEmpty()) && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
+        strategyType = StrategyType.LEGACY;
+      } else if (ruleMaps != null && !ruleMaps.isEmpty()) {
+        strategyType = StrategyType.RULES;
+      } else {
+        strategyType = StrategyType.POLICY;
+      }
+
+      switch (strategyType) {
+        case LEGACY:
+          return new LegacyAssignStrategy();
+        case RULES:
+          List<Rule> rules = new ArrayList<>();
+          for (Object map : ruleMaps) rules.add(new Rule((Map) map));
+          return new RulesBasedAssignStrategy(rules, snitches, clusterState);
+        case POLICY:
+          return new PolicyBasedAssignStrategy(policyName);
+        default:
+          throw new Assign.AssignmentException("Unknown strategy type: " + strategyType);
+      }
+    }
+
+    private enum StrategyType {
+      LEGACY, RULES, POLICY;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 4f66ff9..542345d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -42,6 +42,7 @@ import org.apache.solr.client.solrj.cloud.autoscaling.NotEmptyException;
 import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
 import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+import org.apache.solr.cloud.CloudUtil;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
@@ -131,7 +132,6 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
 
     ocmh.validateConfigOrThrowSolrException(configName);
 
-    List<String> nodeList = new ArrayList<>();
     String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
     String policy = message.getStr(Policy.POLICY);
     AutoScalingConfig autoScalingConfig = ocmh.cloudManager.getDistribStateManager().getAutoScalingConfig();
@@ -177,10 +177,12 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
       }
 
+      // refresh cluster state
+      clusterState = ocmh.cloudManager.getClusterStateProvider().getClusterState();
+
       List<ReplicaPosition> replicaPositions = null;
       try {
-        replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, message,
-            nodeList, shardNames, sessionWrapper);
+        replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, clusterState.getCollection(collectionName), message, shardNames, sessionWrapper);
       } catch (Assign.AssignmentException e) {
         ZkNodeProps deleteMessage = new ZkNodeProps("name", collectionName);
         new DeleteCollectionCmd(ocmh).call(clusterState, deleteMessage, results);
@@ -188,7 +190,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
         throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e.getCause());
       }
 
-      if (nodeList.isEmpty()) {
+      if (replicaPositions.isEmpty()) {
         log.debug("Finished create command for collection: {}", collectionName);
         return;
       }
@@ -333,8 +335,9 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
   }
 
   public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                            DocCollection docCollection,
                                                             ZkNodeProps message,
-                                                            List<String> nodeList, List<String> shardNames,
+                                                            List<String> shardNames,
                                                             AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException, Assign.AssignmentException {
     final String collectionName = message.getStr(NAME);
     // look at the replication factor and see if it matches reality
@@ -342,11 +345,9 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
     int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
     int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
     int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    String policy = message.getStr(Policy.POLICY);
-    boolean usePolicyFramework = !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || policy != null;
+    boolean usePolicyFramework = CloudUtil.usePolicyFramework(docCollection, cloudManager);
 
-    Integer numSlices = shardNames.size();
+    int numSlices = shardNames.size();
     int maxShardsPerNode = checkMaxShardsPerNode(message, usePolicyFramework);
 
     // we need to look at every node and see how many cores it serves
@@ -354,7 +355,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
     // but (for now) require that each core goes on a distinct node.
 
     List<ReplicaPosition> replicaPositions;
-    nodeList.addAll(Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM));
+    List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM);
     if (nodeList.isEmpty()) {
       log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
 
@@ -387,8 +388,17 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
             + ". This requires " + requestedShardsToCreate
             + " shards to be created (higher than the allowed number)");
       }
-      replicaPositions = Assign.identifyNodes(cloudManager
-          , clusterState, nodeList, collectionName, message, shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas);
+      Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
+          .forCollection(collectionName)
+          .forShard(shardNames)
+          .assignNrtReplicas(numNrtReplicas)
+          .assignTlogReplicas(numTlogReplicas)
+          .assignPullReplicas(numPullReplicas)
+          .onNodes(nodeList)
+          .build();
+      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(cloudManager);
+      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, docCollection);
+      replicaPositions = assignStrategy.assign(cloudManager, assignRequest);
       sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
     }
     return replicaPositions;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
index a09eec3..c622f0f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
@@ -104,20 +104,25 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
     try {
       for (ZkNodeProps sourceReplica : sourceReplicas) {
         NamedList nl = new NamedList();
-        log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+        String sourceCollection = sourceReplica.getStr(COLLECTION_PROP);
+        log.info("Going to create replica for collection={} shard={} on node={}", sourceCollection, sourceReplica.getStr(SHARD_ID_PROP), target);
         String targetNode = target;
         if (targetNode == null) {
           Replica.Type replicaType = Replica.Type.get(sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-          targetNode = Assign.identifyNodes(ocmh.cloudManager,
-              clusterState,
-              new ArrayList<>(ocmh.cloudManager.getClusterStateProvider().getLiveNodes()),
-              sourceReplica.getStr(COLLECTION_PROP),
-              message,
-              Collections.singletonList(sourceReplica.getStr(SHARD_ID_PROP)),
-              replicaType == Replica.Type.NRT ? 1: 0,
-              replicaType == Replica.Type.TLOG ? 1 : 0,
-              replicaType == Replica.Type.PULL ? 1 : 0
-          ).get(0).node;
+          int numNrtReplicas = replicaType == Replica.Type.NRT ? 1 : 0;
+          int numTlogReplicas = replicaType == Replica.Type.TLOG ? 1 : 0;
+          int numPullReplicas = replicaType == Replica.Type.PULL ? 1 : 0;
+          Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
+              .forCollection(sourceCollection)
+              .forShard(Collections.singletonList(sourceReplica.getStr(SHARD_ID_PROP)))
+              .assignNrtReplicas(numNrtReplicas)
+              .assignTlogReplicas(numTlogReplicas)
+              .assignPullReplicas(numPullReplicas)
+              .onNodes(new ArrayList<>(ocmh.cloudManager.getClusterStateProvider().getLiveNodes()))
+              .build();
+          Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
+          Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, clusterState.getCollection(sourceCollection));
+          targetNode = assignStrategy.assign(ocmh.cloudManager, assignRequest).get(0).node;
           sessionWrapperRef.set(PolicyHelper.getLastSessionWrapper(true));
         }
         ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, targetNode);
@@ -127,7 +132,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
               countDownLatch.countDown();
               if (nl.get("failure") != null) {
                 String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-                    " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+                    " on node=%s", sourceCollection, sourceReplica.getStr(SHARD_ID_PROP), target);
                 log.warn(errorString);
                 // one replica creation failed. Make the best attempt to
                 // delete all the replicas created so far in the target
@@ -138,7 +143,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
                 }
               } else {
                 log.debug("Successfully created replica for collection={} shard={} on node={}",
-                    sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+                    sourceCollection, sourceReplica.getStr(SHARD_ID_PROP), target);
               }
             }).get(0);
 
@@ -147,7 +152,7 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
           if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
             String shardName = sourceReplica.getStr(SHARD_ID_PROP);
             String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
-            String collectionName = sourceReplica.getStr(COLLECTION_PROP);
+            String collectionName = sourceCollection;
             String key = collectionName + "_" + replicaName;
             CollectionStateWatcher watcher;
             if (waitForFinalState) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
index d082ac3..d100ce0 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
@@ -232,11 +232,17 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
     PolicyHelper.SessionWrapper sessionWrapper = null;
 
     try {
-      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(
-          ocmh.cloudManager, clusterState,
-          nodeList, restoreCollectionName,
-          message, sliceNames,
-          numNrtReplicas, numTlogReplicas, numPullReplicas);
+      Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
+          .forCollection(restoreCollectionName)
+          .forShard(sliceNames)
+          .assignNrtReplicas(numNrtReplicas)
+          .assignTlogReplicas(numTlogReplicas)
+          .assignPullReplicas(numPullReplicas)
+          .onNodes(nodeList)
+          .build();
+      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
+      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, restoreCollection);
+      List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
       sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
       //Create one replica per shard and copy backed up data to it
       for (Slice slice : restoreCollection.getSlices()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index 00488a3..bac45ab 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -361,12 +361,17 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
       }
 
       t = timings.sub("identifyNodesForReplicas");
-      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(ocmh.cloudManager,
-          clusterState,
-          new ArrayList<>(clusterState.getLiveNodes()),
-          collectionName,
-          new ZkNodeProps(collection.getProperties()),
-          subSlices, numNrt.get(), numTlog.get(), numPull.get());
+      Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
+          .forCollection(collectionName)
+          .forShard(subSlices)
+          .assignNrtReplicas(numNrt.get())
+          .assignTlogReplicas(numTlog.get())
+          .assignPullReplicas(numPull.get())
+          .onNodes(new ArrayList<>(clusterState.getLiveNodes()))
+          .build();
+      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
+      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, collection);
+      List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
       sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
       t.stop();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index 34843c1..6cbdbfb 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -282,9 +282,7 @@ public class ReplicaMutator {
 
     Slice slice = collection != null ?  collection.getSlice(sliceName) : null;
 
-    Map<String, Object> replicaProps = new LinkedHashMap<>();
-
-    replicaProps.putAll(message.getProperties());
+    Map<String, Object> replicaProps = new LinkedHashMap<>(message.getProperties());
     if (slice != null) {
       Replica oldReplica = slice.getReplica(coreNodeName);
       if (oldReplica != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
index 09a119b..1ac0cae 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
@@ -79,7 +79,7 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
     });
 
     assertTrue("Should have gotten the right error message back",
-          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+          e.getMessage().contains("given the current number of eligible live nodes"));
 
 
     // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
@@ -106,7 +106,7 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
     });
 
     assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+        e2.getMessage().contains("given the current number of eligible live nodes"));
 
     // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
     waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
@@ -141,7 +141,7 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
           .process(cluster.getSolrClient());
     });
     assertTrue("Should have gotten the right error message back",
-        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+        e.getMessage().contains("given the current number of eligible live nodes"));
 
     // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
     List<String> nodes = getAllNodeNames(collectionName);
@@ -156,7 +156,7 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
           .process(cluster.getSolrClient());
     });
     assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+        e2.getMessage().contains("given the current number of eligible live nodes"));
 
     // And finally, ensure that there are all the replicas we expect. We should have shards 1, 2 and 4 and each
     // should have exactly two replicas

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
index d1dcecf..bfd5878 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TestPolicyCloud.java
@@ -325,6 +325,7 @@ public class TestPolicyCloud extends SolrCloudTestCase {
         Utils.getObjectByPath(json, true, "cluster-policy[2]/port"));
 
     CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", "conf", "s1", 1, 1, 1)
+        .setMaxShardsPerNode(-1)
         .process(cluster.getSolrClient());
 
     DocCollection coll = getCollectionState("policiesTest");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 4b73200..08ce6bf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -741,7 +741,6 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       results.add(CoreAdminParams.REQUESTID, props.getStr(CommonAdminParams.ASYNC));
     }
     boolean waitForFinalState = props.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-    List<String> nodeList = new ArrayList<>();
     final String collectionName = props.getStr(NAME);
 
     String router = props.getStr("router.name", DocRouter.DEFAULT_NAME);
@@ -784,8 +783,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     opDelay(collectionName, CollectionParams.CollectionAction.CREATE.name());
 
     AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    List<ReplicaPosition> replicaPositions = CreateCollectionCmd.buildReplicaPositions(cloudManager, getClusterState(), props,
-        nodeList, shardNames, sessionWrapper);
+    List<ReplicaPosition> replicaPositions = CreateCollectionCmd.buildReplicaPositions(cloudManager, getClusterState(), cmd.collection, props,
+        shardNames, sessionWrapper);
     if (sessionWrapper.get() != null) {
       sessionWrapper.get().release();
     }
@@ -1102,13 +1101,18 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     SplitShardCmd.fillRanges(cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames, true);
     // add replicas for new subShards
     int repFactor = parentSlice.getReplicas().size();
-    List<ReplicaPosition> replicaPositions = Assign.identifyNodes(cloudManager,
-        clusterState,
-        new ArrayList<>(clusterState.getLiveNodes()),
-        collectionName,
-        new ZkNodeProps(collection.getProperties()),
-        // reproduce the bug
-        subSlices, repFactor, 0, 0);
+    Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
+        .forCollection(collectionName)
+        .forShard(subSlices)
+        .assignNrtReplicas(repFactor)
+        .assignTlogReplicas(0)
+        .assignPullReplicas(0)
+        .onNodes(new ArrayList<>(clusterState.getLiveNodes()))
+        .build();
+    Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(cloudManager);
+    Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, collection);
+    // reproduce the bug
+    List<ReplicaPosition> replicaPositions = assignStrategy.assign(cloudManager, assignRequest);
     PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
     if (sessionWrapper != null) sessionWrapper.release();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
index 3637428..ffdfff7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
@@ -257,6 +257,7 @@ public class TestSimPolicyCloud extends SimSolrCloudTestCase {
         Utils.getObjectByPath(json, true, "cluster-policy[2]/port"));
 
     CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", "conf", "s1", 1, 1, 1)
+        .setMaxShardsPerNode(-1)
         .process(solrClient);
     CloudTestUtils.waitForState(cluster, "Timeout waiting for collection to become active", "policiesTest",
         CloudTestUtils.clusterShape(1, 3, false, true));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c587410f/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
index 591a001..62d8761 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
@@ -40,7 +40,7 @@ public class ReplicaPosition implements Comparable<ReplicaPosition> {
   public int compareTo(ReplicaPosition that) {
     //this is to ensure that we try one replica from each shard first instead of
     // all replicas from same shard
-    return that.index > index ? -1 : that.index == index ? 0 : 1;
+    return Integer.compare(index, that.index);
   }
 
   @Override


[16/29] lucene-solr:jira/http2: LUCENE-8505: IndexWriter#addIndices will now fail if the target index is sorted but the candidate is not.

Posted by da...@apache.org.
LUCENE-8505: IndexWriter#addIndices will now fail if the target index is sorted but the candidate is not.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2bad3c49
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2bad3c49
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2bad3c49

Branch: refs/heads/jira/http2
Commit: 2bad3c498517120c8aaaf805fd49a5cb459417c0
Parents: e437b2f
Author: Jim Ferenczi <ji...@apache.org>
Authored: Tue Sep 25 09:14:07 2018 +0200
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Tue Sep 25 09:14:07 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  3 ++
 .../org/apache/lucene/index/IndexWriter.java    | 27 +++++++----
 .../org/apache/lucene/index/MergeState.java     | 44 ++----------------
 .../apache/lucene/index/TestIndexSorting.java   | 49 ++++++++++++++++++--
 4 files changed, 70 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2bad3c49/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index d305759..60afb58 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -117,6 +117,9 @@ Changes in Runtime Behavior
   total hit counts accurately up to 1,000 in order to enable top-hits
   optimizations such as block-max WAND (LUCENE-8135). (Adrien Grand)
 
+* LUCENE-8505: IndexWriter#addIndices will now fail if the target index is sorted but
+  the candidate is not. (Jim Ferenczi)
+
 New Features
 
 * LUCENE-8340: LongPoint#newDistanceQuery may be used to boost scores based on

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2bad3c49/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 5affd85..db6ef9f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -53,6 +53,7 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
@@ -934,21 +935,30 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
   }
 
   /** Confirms that the incoming index sort (if any) matches the existing index sort (if any).  */
-  private void validateIndexSort() throws CorruptIndexException {
+  private void validateIndexSort() {
     Sort indexSort = config.getIndexSort();
     if (indexSort != null) {
       for(SegmentCommitInfo info : segmentInfos) {
         Sort segmentIndexSort = info.info.getIndexSort();
-        if (segmentIndexSort != null && indexSort.equals(segmentIndexSort) == false) {
+        if (segmentIndexSort == null || isCongruentSort(indexSort, segmentIndexSort) == false) {
           throw new IllegalArgumentException("cannot change previous indexSort=" + segmentIndexSort + " (from segment=" + info + ") to new indexSort=" + indexSort);
-        } else if (segmentIndexSort == null) {
-          // Flushed segments are not sorted if they were built with a version prior to 6.5.0
-          throw new CorruptIndexException("segment not sorted with indexSort=" + segmentIndexSort, info.info.toString());
         }
       }
     }
   }
 
+  /**
+   * Returns true if <code>indexSort</code> is a prefix of <code>otherSort</code>.
+   **/
+  static boolean isCongruentSort(Sort indexSort, Sort otherSort) {
+    final SortField[] fields1 = indexSort.getSort();
+    final SortField[] fields2 = otherSort.getSort();
+    if (fields1.length > fields2.length) {
+      return false;
+    }
+    return Arrays.asList(fields1).equals(Arrays.asList(fields2).subList(0, fields1.length));
+  }
+
   // reads latest field infos for the commit
   // this is used on IW init and addIndexes(Dir) to create/update the global field map.
   // TODO: fix tests abusing this method!
@@ -2824,8 +2834,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
 
             Sort segmentIndexSort = info.info.getIndexSort();
 
-            if (indexSort != null && segmentIndexSort != null && indexSort.equals(segmentIndexSort) == false) {
-              // TODO: we could make this smarter, e.g. if the incoming indexSort is congruent with our sort ("starts with") then it's OK
+            if (indexSort != null && (segmentIndexSort == null || isCongruentSort(indexSort, segmentIndexSort) == false)) {
               throw new IllegalArgumentException("cannot change index sort from " + segmentIndexSort + " to " + indexSort);
             }
 
@@ -2908,8 +2917,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
     }
 
     Sort leafIndexSort = segmentMeta.getSort();
-    if (config.getIndexSort() != null && leafIndexSort != null
-        && config.getIndexSort().equals(leafIndexSort) == false) {
+    if (config.getIndexSort() != null &&
+          (leafIndexSort == null || isCongruentSort(config.getIndexSort(), leafIndexSort) == false)) {
       throw new IllegalArgumentException("cannot change index sort from " + leafIndexSort + " to " + config.getIndexSort());
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2bad3c49/lucene/core/src/java/org/apache/lucene/index/MergeState.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergeState.java b/lucene/core/src/java/org/apache/lucene/index/MergeState.java
index 9ad69f6..0e47a5a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergeState.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergeState.java
@@ -34,6 +34,8 @@ import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.packed.PackedInts;
 import org.apache.lucene.util.packed.PackedLongValues;
 
+import static org.apache.lucene.index.IndexWriter.isCongruentSort;
+
 /** Holds common state used during segment merging.
  *
  * @lucene.experimental */
@@ -223,50 +225,14 @@ public class MergeState {
       return originalReaders;
     }
 
-    /** If an incoming reader is not sorted, because it was flushed by IW older than {@link Version.LUCENE_7_0_0}
-     * or because we add unsorted segments from another index {@link IndexWriter#addIndexes(CodecReader...)} ,
-     * we sort it here:
-     */
-    final Sorter sorter = new Sorter(indexSort);
     List<CodecReader> readers = new ArrayList<>(originalReaders.size());
 
     for (CodecReader leaf : originalReaders) {
       Sort segmentSort = leaf.getMetaData().getSort();
-
-      if (segmentSort == null) {
-        // This segment was written by flush, so documents are not yet sorted, so we sort them now:
-        long t0 = System.nanoTime();
-        Sorter.DocMap sortDocMap = sorter.sort(leaf);
-        long t1 = System.nanoTime();
-        double msec = (t1-t0)/1000000.0;
-        
-        if (sortDocMap != null) {
-          if (infoStream.isEnabled("SM")) {
-            infoStream.message("SM", String.format(Locale.ROOT, "segment %s is not sorted; wrapping for sort %s now (%.2f msec to sort)", leaf, indexSort, msec));
-          }
-          needsIndexSort = true;
-          leaf = SlowCodecReaderWrapper.wrap(SortingLeafReader.wrap(new MergeReaderWrapper(leaf), sortDocMap));
-          leafDocMaps[readers.size()] = new DocMap() {
-              @Override
-              public int get(int docID) {
-                return sortDocMap.oldToNew(docID);
-              }
-            };
-        } else {
-          if (infoStream.isEnabled("SM")) {
-            infoStream.message("SM", String.format(Locale.ROOT, "segment %s is not sorted, but is already accidentally in sort %s order (%.2f msec to sort)", leaf, indexSort, msec));
-          }
-        }
-
-      } else {
-        if (segmentSort.equals(indexSort) == false) {
-          throw new IllegalArgumentException("index sort mismatch: merged segment has sort=" + indexSort + " but to-be-merged segment has sort=" + segmentSort);
-        }
-        if (infoStream.isEnabled("SM")) {
-          infoStream.message("SM", "segment " + leaf + " already sorted");
-        }
+      if (segmentSort == null || isCongruentSort(indexSort, segmentSort) == false) {
+        throw new IllegalArgumentException("index sort mismatch: merged segment has sort=" + indexSort +
+            " but to-be-merged segment has sort=" + (segmentSort == null ? "null" : segmentSort));
       }
-
       readers.add(leaf);
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2bad3c49/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
index 3e62aad..3857a97 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
@@ -19,6 +19,7 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -1820,19 +1821,52 @@ public class TestIndexSorting extends LuceneTestCase {
     dir.close();
   }
 
-  public void testAddIndexes(boolean withDeletes, boolean useReaders) throws Exception {
+  public void testBadAddIndexes() throws Exception {
     Directory dir = newDirectory();
     Sort indexSort = new Sort(new SortField("foo", SortField.Type.LONG));
     IndexWriterConfig iwc1 = newIndexWriterConfig();
-    if (random().nextBoolean()) {
-      iwc1.setIndexSort(indexSort);
+    iwc1.setIndexSort(indexSort);
+    IndexWriter w = new IndexWriter(dir, iwc1);
+    w.addDocument(new Document());
+    List<Sort> indexSorts = Arrays.asList(null, new Sort(new SortField("bar", SortField.Type.LONG)));
+    for (Sort sort : indexSorts) {
+      Directory dir2 = newDirectory();
+      IndexWriterConfig iwc2 = newIndexWriterConfig();
+      if (sort != null) {
+        iwc2.setIndexSort(sort);
+      }
+      IndexWriter w2 = new IndexWriter(dir2, iwc2);
+      w2.addDocument(new Document());
+      final IndexReader reader = w2.getReader();
+      w2.close();
+      IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> w.addIndexes(dir2));
+      assertThat(expected.getMessage(), containsString("cannot change index sort"));
+      CodecReader[] codecReaders = new CodecReader[reader.leaves().size()];
+      for (int i = 0; i < codecReaders.length; ++i) {
+        codecReaders[i] = (CodecReader) reader.leaves().get(i).reader();
+      }
+      expected = expectThrows(IllegalArgumentException.class, () -> w.addIndexes(codecReaders));
+      assertThat(expected.getMessage(), containsString("cannot change index sort"));
+
+      reader.close();
+      dir2.close();
     }
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    w.close();
+    dir.close();
+  }
+
+  public void testAddIndexes(boolean withDeletes, boolean useReaders) throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc1 = newIndexWriterConfig();
+    Sort indexSort = new Sort(new SortField("foo", SortField.Type.LONG), new SortField("bar", SortField.Type.LONG));
+    iwc1.setIndexSort(indexSort);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc1);
     final int numDocs = atLeast(100);
     for (int i = 0; i < numDocs; ++i) {
       Document doc = new Document();
       doc.add(new StringField("id", Integer.toString(i), Store.NO));
       doc.add(new NumericDocValuesField("foo", random().nextInt(20)));
+      doc.add(new NumericDocValuesField("bar", random().nextInt(20)));
       w.addDocument(doc);
     }
     if (withDeletes) {
@@ -1848,7 +1882,12 @@ public class TestIndexSorting extends LuceneTestCase {
 
     Directory dir2 = newDirectory();
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
-    iwc.setIndexSort(indexSort);
+    if (indexSort != null && random().nextBoolean()) {
+      // test congruent index sort
+      iwc.setIndexSort(new Sort(new SortField("foo", SortField.Type.LONG)));
+    } else {
+      iwc.setIndexSort(indexSort);
+    }
     IndexWriter w2 = new IndexWriter(dir2, iwc);
 
     if (useReaders) {


[19/29] lucene-solr:jira/http2: Fix javadoc typo in TermInSetQuery.

Posted by da...@apache.org.
Fix javadoc typo in TermInSetQuery.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/05f935f6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/05f935f6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/05f935f6

Branch: refs/heads/jira/http2
Commit: 05f935f6e480ec314e4568b9349810fcd53cc74f
Parents: e16d7d6
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue Sep 25 17:55:42 2018 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue Sep 25 17:56:38 2018 +0100

----------------------------------------------------------------------
 lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/05f935f6/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
index 896f20f..9181668 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
@@ -49,7 +49,7 @@ import org.apache.lucene.util.RamUsageEstimator;
  * Specialization for a disjunction over many terms that behaves like a
  * {@link ConstantScoreQuery} over a {@link BooleanQuery} containing only
  * {@link org.apache.lucene.search.BooleanClause.Occur#SHOULD} clauses.
- * <p>For instance in the following example, both @{code q1} and {@code q2}
+ * <p>For instance in the following example, both {@code q1} and {@code q2}
  * would yield the same scores:
  * <pre class="prettyprint">
  * Query q1 = new TermInSetQuery(new Term("field", "foo"), new Term("field", "bar"));


[02/29] lucene-solr:jira/http2: SOLR-12792: extract test data into separate files in autoscaling tests

Posted by da...@apache.org.
SOLR-12792: extract test data into separate files in autoscaling tests


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1d604d1b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1d604d1b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1d604d1b

Branch: refs/heads/jira/http2
Commit: 1d604d1b3ffe1560700e5e462e9e796646a30d7a
Parents: 6adeb5b
Author: Noble Paul <no...@apache.org>
Authored: Fri Sep 21 13:26:52 2018 +1000
Committer: Noble Paul <no...@apache.org>
Committed: Fri Sep 21 13:26:52 2018 +1000

----------------------------------------------------------------------
 .../testAutoScalingHandlerFailure.json          | 141 +++++++++++++++
 .../solrj/cloud/autoscaling/TestPolicy2.java    | 178 +------------------
 2 files changed, 143 insertions(+), 176 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d604d1b/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoScalingHandlerFailure.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoScalingHandlerFailure.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoScalingHandlerFailure.json
new file mode 100644
index 0000000..97368c2
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoScalingHandlerFailure.json
@@ -0,0 +1,141 @@
+{"diagnostics":{
+  "sortedNodes":[{
+    "node":"127.0.0.1:63191_solr",
+    "isLive":true,
+    "cores":3.0,
+    "freedisk":680.908073425293,
+    "heapUsage":24.97510064011647,
+    "sysLoadAvg":272.75390625,
+    "totaldisk":1037.938980102539,
+    "replicas":{"readApiTestViolations":{"shard1":[{"core_node5":{
+      "core":"readApiTestViolations_shard1_replica_n2",
+      "leader":"true",
+      "base_url":"https://127.0.0.1:63191/solr",
+      "node_name":"127.0.0.1:63191_solr",
+      "state":"active",
+      "type":"NRT",
+      "force_set_state":"false",
+      "INDEX.sizeInGB":6.426125764846802E-8,
+      "shard":"shard1",
+      "collection":"readApiTestViolations"}},
+      {"core_node7":{
+        "core":"readApiTestViolations_shard1_replica_n4",
+        "base_url":"https://127.0.0.1:63191/solr",
+        "node_name":"127.0.0.1:63191_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"readApiTestViolations"}},
+      {"core_node12":{
+        "core":"readApiTestViolations_shard1_replica_n10",
+        "base_url":"https://127.0.0.1:63191/solr",
+        "node_name":"127.0.0.1:63191_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"readApiTestViolations"}}]}}},
+    {
+      "node":"127.0.0.1:63192_solr",
+      "isLive":true,
+      "cores":3.0,
+      "freedisk":680.908073425293,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{"readApiTestViolations":{"shard1":[{"core_node3":{
+        "core":"readApiTestViolations_shard1_replica_n1",
+        "base_url":"https://127.0.0.1:63192/solr",
+        "node_name":"127.0.0.1:63192_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"readApiTestViolations"}},
+        {"core_node9":{
+          "core":"readApiTestViolations_shard1_replica_n6",
+          "base_url":"https://127.0.0.1:63192/solr",
+          "node_name":"127.0.0.1:63192_solr",
+          "state":"active",
+          "type":"NRT",
+          "force_set_state":"false",
+          "INDEX.sizeInGB":6.426125764846802E-8,
+          "shard":"shard1",
+          "collection":"readApiTestViolations"}},
+        {"core_node11":{
+          "core":"readApiTestViolations_shard1_replica_n8",
+          "base_url":"https://127.0.0.1:63192/solr",
+          "node_name":"127.0.0.1:63192_solr",
+          "state":"active",
+          "type":"NRT",
+          "force_set_state":"false",
+          "INDEX.sizeInGB":6.426125764846802E-8,
+          "shard":"shard1",
+          "collection":"readApiTestViolations"}}]}}},
+    {
+      "node":"127.0.0.1:63219_solr",
+      "isLive":true,
+      "cores":0.0,
+      "freedisk":680.908073425293,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{}}],
+  "liveNodes":["127.0.0.1:63191_solr",
+    "127.0.0.1:63192_solr",
+    "127.0.0.1:63219_solr"],
+  "violations":[{
+    "collection":"readApiTestViolations",
+    "shard":"shard1",
+    "node":"127.0.0.1:63191_solr",
+    "violation":{
+      "replica":{
+        "NRT":3,
+        "count":3},
+      "delta":2.0},
+    "clause":{
+      "replica":"<3",
+      "shard":"#EACH",
+      "node":"#ANY",
+      "collection":"readApiTestViolations"}},
+    {
+      "collection":"readApiTestViolations",
+      "shard":"shard1",
+      "node":"127.0.0.1:63192_solr",
+      "violation":{
+        "replica":{
+          "NRT":3,
+          "count":3},
+        "delta":2.0},
+      "clause":{
+        "replica":"<2",
+        "shard":"#EACH",
+        "node":"#ANY",
+        "collection":"readApiTestViolations"}}],
+  "config":{
+    "cluster-preferences":[{
+      "minimize":"cores",
+      "precision":3},
+      {
+        "maximize":"freedisk",
+        "precision":100},
+      {
+        "minimize":"sysLoadAvg",
+        "precision":10},
+      {
+        "minimize":"heapUsage",
+        "precision":10}],
+    "cluster-policy":[{
+      "cores":"<10",
+      "node":"#ANY"},
+      {
+        "replica":"<2",
+        "shard":"#EACH",
+        "node":"#ANY"},
+      {
+        "nodeRole":"overseer",
+        "replica":0}]}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d604d1b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
index b274974..71c0287 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
@@ -291,182 +291,8 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     };
   }
 
-  public void testAutoScalingHandlerFailure() {
-    String diagnostics = "{" +
-        "  'diagnostics': {" +
-        "    'sortedNodes': [" +
-        "      {" +
-        "        'node': '127.0.0.1:63191_solr'," +
-        "        'isLive': true," +
-        "        'cores': 3.0," +
-        "        'freedisk': 680.908073425293," +
-        "        'heapUsage': 24.97510064011647," +
-        "        'sysLoadAvg': 272.75390625," +
-        "        'totaldisk': 1037.938980102539," +
-        "        'replicas': {" +
-        "          'readApiTestViolations': {" +
-        "            'shard1': [" +
-        "              {" +
-        "                'core_node5': {" +
-        "                  'core': 'readApiTestViolations_shard1_replica_n2'," +
-        "                  'leader': 'true'," +
-        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
-        "                  'node_name': '127.0.0.1:63191_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'readApiTestViolations'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node7': {" +
-        "                  'core': 'readApiTestViolations_shard1_replica_n4'," +
-        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
-        "                  'node_name': '127.0.0.1:63191_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'readApiTestViolations'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node12': {" +
-        "                  'core': 'readApiTestViolations_shard1_replica_n10'," +
-        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
-        "                  'node_name': '127.0.0.1:63191_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'readApiTestViolations'" +
-        "                }" +
-        "              }" +
-        "            ]" +
-        "          }" +
-        "        }" +
-        "      }," +
-        "      {" +
-        "        'node': '127.0.0.1:63192_solr'," +
-        "        'isLive': true," +
-        "        'cores': 3.0," +
-        "        'freedisk': 680.908073425293," +
-        "        'heapUsage': 24.98878807983566," +
-        "        'sysLoadAvg': 272.75390625," +
-        "        'totaldisk': 1037.938980102539," +
-        "        'replicas': {" +
-        "          'readApiTestViolations': {" +
-        "            'shard1': [" +
-        "              {" +
-        "                'core_node3': {" +
-        "                  'core': 'readApiTestViolations_shard1_replica_n1'," +
-        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
-        "                  'node_name': '127.0.0.1:63192_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'readApiTestViolations'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node9': {" +
-        "                  'core': 'readApiTestViolations_shard1_replica_n6'," +
-        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
-        "                  'node_name': '127.0.0.1:63192_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'readApiTestViolations'" +
-        "                }" +
-        "              }," +
-        "              {" +
-        "                'core_node11': {" +
-        "                  'core': 'readApiTestViolations_shard1_replica_n8'," +
-        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
-        "                  'node_name': '127.0.0.1:63192_solr'," +
-        "                  'state': 'active'," +
-        "                  'type': 'NRT'," +
-        "                  'force_set_state': 'false'," +
-        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
-        "                  'shard': 'shard1'," +
-        "                  'collection': 'readApiTestViolations'" +
-        "                }" +
-        "              }" +
-        "            ]" +
-        "          }" +
-        "        }" +
-        "      }," +
-        "      {" +
-        "        'node': '127.0.0.1:63219_solr'," +
-        "        'isLive': true," +
-        "        'cores': 0.0," +
-        "        'freedisk': 680.908073425293," +
-        "        'heapUsage': 24.98878807983566," +
-        "        'sysLoadAvg': 272.75390625," +
-        "        'totaldisk': 1037.938980102539," +
-        "        'replicas': {}" +
-        "      }" +
-        "    ]," +
-        "    'liveNodes': [" +
-        "      '127.0.0.1:63191_solr'," +
-        "      '127.0.0.1:63192_solr'," +
-        "      '127.0.0.1:63219_solr'" +
-        "    ]," +
-        "    'violations': [" +
-        "      {" +
-        "        'collection': 'readApiTestViolations'," +
-        "        'shard': 'shard1'," +
-        "        'node': '127.0.0.1:63191_solr'," +
-        "        'violation': {" +
-        "          'replica': {'NRT': 3, 'count': 3}," +
-        "          'delta': 2.0" +
-        "        }," +
-        "        'clause': {" +
-        "          'replica': '<3'," +
-        "          'shard': '#EACH'," +
-        "          'node': '#ANY'," +
-        "          'collection': 'readApiTestViolations'" +
-        "        }" +
-        "      }," +
-        "      {" +
-        "        'collection': 'readApiTestViolations'," +
-        "        'shard': 'shard1'," +
-        "        'node': '127.0.0.1:63192_solr'," +
-        "        'violation': {" +
-        "          'replica': {'NRT': 3, 'count': 3}," +
-        "          'delta': 2.0" +
-        "        }," +
-        "        'clause': {" +
-        "          'replica': '<2'," +
-        "          'shard': '#EACH'," +
-        "          'node': '#ANY'," +
-        "          'collection': 'readApiTestViolations'" +
-        "        }" +
-        "      }" +
-        "    ]," +
-        "    'config': {" +
-        "      'cluster-preferences': [" +
-        "        {'minimize': 'cores', 'precision': 3}," +
-        "        {'maximize': 'freedisk', 'precision': 100}," +
-        "        {'minimize': 'sysLoadAvg', 'precision': 10}," +
-        "        {'minimize': 'heapUsage', 'precision': 10}" +
-        "      ]," +
-        "      'cluster-policy': [" +
-        "        {'cores': '<10', 'node': '#ANY'}," +
-        "        {'replica': '<2', 'shard': '#EACH', 'node': '#ANY'}," +
-        "        {'nodeRole': 'overseer', 'replica': 0}" +
-        "      ]" +
-        "    }" +
-        "  }}";
-    Map<String, Object> m = (Map<String, Object>) Utils.fromJSONString(diagnostics);
+  public void testAutoScalingHandlerFailure() throws IOException {
+    Map<String, Object> m = (Map<String, Object>) loadFromResource("testAutoScalingHandlerFailure.json");
 
     Policy policy = new Policy((Map<String, Object>) Utils.getObjectByPath(m, false, "diagnostics/config"));
     SolrCloudManager cloudManagerFromDiagnostics = createCloudManagerFromDiagnostics(m);


[25/29] lucene-solr:jira/http2: SOLR-12709: Add TestSimExtremeIndexing for testing simulated large indexing jobs. Several important improvements to the simulator.

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
index ffdfff7..379011d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
@@ -109,7 +109,7 @@ public class TestSimPolicyCloud extends SimSolrCloudTestCase {
 
   public void testCreateCollectionAddReplica() throws Exception  {
     SolrClient solrClient = cluster.simGetSolrClient();
-    String nodeId = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String nodeId = cluster.getSimClusterStateProvider().simGetRandomNode();
 
     int port = (Integer)cluster.getSimNodeStateProvider().simGetNodeValue(nodeId, ImplicitSnitch.PORT);
 
@@ -134,13 +134,13 @@ public class TestSimPolicyCloud extends SimSolrCloudTestCase {
 
   public void testCreateCollectionSplitShard() throws Exception  {
     SolrClient solrClient = cluster.simGetSolrClient();
-    String firstNode = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String firstNode = cluster.getSimClusterStateProvider().simGetRandomNode();
     int firstNodePort = (Integer)cluster.getSimNodeStateProvider().simGetNodeValue(firstNode, ImplicitSnitch.PORT);
 
     String secondNode;
     int secondNodePort;
     while (true)  {
-      secondNode = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+      secondNode = cluster.getSimClusterStateProvider().simGetRandomNode();
       secondNodePort = (Integer)cluster.getSimNodeStateProvider().simGetNodeValue(secondNode, ImplicitSnitch.PORT);
       if (secondNodePort != firstNodePort)  break;
     }
@@ -293,7 +293,7 @@ public class TestSimPolicyCloud extends SimSolrCloudTestCase {
 
   public void testCreateCollectionAddShardUsingPolicy() throws Exception {
     SolrClient solrClient = cluster.simGetSolrClient();
-    String nodeId = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String nodeId = cluster.getSimClusterStateProvider().simGetRandomNode();
     int port = (Integer)cluster.getSimNodeStateProvider().simGetNodeValue(nodeId, ImplicitSnitch.PORT);
 
     String commands =  "{set-policy :{c1 : [{replica:1 , shard:'#EACH', port: '" + port + "'}]}}";
@@ -344,7 +344,7 @@ public class TestSimPolicyCloud extends SimSolrCloudTestCase {
       assertTrue("sysLoadAvg value is " + ((Number) val.get("sysLoadAvg")).doubleValue(), Double.compare(((Number) val.get("sysLoadAvg")).doubleValue(), 0.0d) > 0);
     }
     // simulator doesn't have Overseer, so just pick a random node
-    String overseerNode = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String overseerNode = cluster.getSimClusterStateProvider().simGetRandomNode();
     solrClient.request(CollectionAdminRequest.addRole(overseerNode, "overseer"));
     for (int i = 0; i < 10; i++) {
       Map<String, Object> data = Utils.getJson(cluster.getDistribStateManager(), ZkStateReader.ROLES);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
index 8acab5f..744155e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
@@ -482,7 +482,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
       fail("The TriggerAction should have been created by now");
     }
 
-    String lostNodeName = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String lostNodeName = cluster.getSimClusterStateProvider().simGetRandomNode();
     cluster.simRemoveNode(lostNodeName, false);
     boolean await = triggerFiredLatch.await(20000 / SPEED, TimeUnit.MILLISECONDS);
     assertTrue("The trigger did not fire at all", await);
@@ -650,7 +650,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
         "'actions' : [{'name':'test','class':'" + TestEventQueueAction.class.getName() + "'}]" +
         "}}";
 
-    String overseerLeader = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String overseerLeader = cluster.getSimClusterStateProvider().simGetRandomNode();
 
     SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST, setTriggerCommand);
     NamedList<Object> response = solrClient.request(req);
@@ -808,7 +808,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
     SolrClient solrClient = cluster.simGetSolrClient();
 
     // pick overseer node
-    String overseerLeader = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    String overseerLeader = cluster.getSimClusterStateProvider().simGetRandomNode();
 
     // add a node
     String node = cluster.simAddNode();
@@ -867,7 +867,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
     response = solrClient.request(req);
     assertEquals(response.get("result").toString(), "success");
 
-    overseerLeader = cluster.getSimClusterStateProvider().simGetRandomNode(random());
+    overseerLeader = cluster.getSimClusterStateProvider().simGetRandomNode();
 
     // create another node
     log.info("====== ADD NODE 1");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
index 2552f0a..b39951a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
@@ -94,6 +94,7 @@ public class Policy implements MapWriter {
   final List<Pair<String, Type>> params;
   final List<String> perReplicaAttributes;
   final int zkVersion;
+  final boolean empty;
 
   public Policy() {
     this(Collections.emptyMap());
@@ -104,6 +105,7 @@ public class Policy implements MapWriter {
   }
   @SuppressWarnings("unchecked")
   public Policy(Map<String, Object> jsonMap, int version) {
+    this.empty = jsonMap.get(CLUSTER_PREFERENCES) == null && jsonMap.get(CLUSTER_POLICY) == null && jsonMap.get(POLICIES) == null;
     this.zkVersion = version;
     int[] idx = new int[1];
     List<Preference> initialClusterPreferences = ((List<Map<String, Object>>) jsonMap.getOrDefault(CLUSTER_PREFERENCES, emptyList())).stream()
@@ -156,6 +158,7 @@ public class Policy implements MapWriter {
   }
 
   private Policy(Map<String, List<Clause>> policies, List<Clause> clusterPolicy, List<Preference> clusterPreferences, int version) {
+    this.empty = policies == null && clusterPolicy == null && clusterPreferences == null;
     this.zkVersion = version;
     this.policies = policies != null ? Collections.unmodifiableMap(policies) : Collections.emptyMap();
     this.clusterPolicy = clusterPolicy != null ? Collections.unmodifiableList(clusterPolicy) : Collections.emptyList();
@@ -281,12 +284,17 @@ public class Policy implements MapWriter {
             return p.compare(r1, r2, false);
           });
         } catch (Exception e) {
+//          log.error("Exception! prefs = {}, recent r1 = {}, r2 = {}, matrix = {}",
+//              clusterPreferences,
+//              lastComparison[0],
+//              lastComparison[1],
+//              Utils.toJSONString(Utils.getDeepCopy(tmpMatrix, 6, false)));
           log.error("Exception! prefs = {}, recent r1 = {}, r2 = {}, matrix = {}",
               clusterPreferences,
-              lastComparison[0],
-              lastComparison[1],
-              Utils.toJSONString(Utils.getDeepCopy(tmpMatrix, 6, false)));
-          throw e;
+              lastComparison[0].node,
+              lastComparison[1].node,
+              matrix.size());
+          throw new RuntimeException(e.getMessage());
         }
         p.setApproxVal(tmpMatrix);
       }
@@ -405,15 +413,6 @@ public class Policy implements MapWriter {
       return currentSession.getViolations();
     }
 
-    public boolean undo() {
-      if (currentSession.parent != null) {
-        currentSession = currentSession.parent;
-        return true;
-      }
-      return false;
-    }
-
-
     public Session getCurrentSession() {
       return currentSession;
     }
@@ -461,6 +460,10 @@ public class Policy implements MapWriter {
     return Utils.toJSONString(this);
   }
 
+  public boolean isEmpty() {
+    return empty;
+  }
+
   /*This stores the logical state of the system, given a policy and
    * a cluster state.
    *
@@ -475,12 +478,10 @@ public class Policy implements MapWriter {
     List<Clause> expandedClauses;
     List<Violation> violations = new ArrayList<>();
     Transaction transaction;
-    private Session parent = null;
 
     private Session(List<String> nodes, SolrCloudManager cloudManager,
                     List<Row> matrix, List<Clause> expandedClauses, int znodeVersion,
-                    NodeStateProvider nodeStateProvider, Transaction transaction, Session parent) {
-      this.parent = parent;
+                    NodeStateProvider nodeStateProvider, Transaction transaction) {
       this.transaction = transaction;
       this.nodes = nodes;
       this.cloudManager = cloudManager;
@@ -552,7 +553,7 @@ public class Policy implements MapWriter {
     }
 
     Session copy() {
-      return new Session(nodes, cloudManager, getMatrixCopy(), expandedClauses, znodeVersion, nodeStateProvider, transaction, this);
+      return new Session(nodes, cloudManager, getMatrixCopy(), expandedClauses, znodeVersion, nodeStateProvider, transaction);
     }
 
     public Row getNode(String node) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
index 29ccc65..123d144 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
@@ -351,7 +351,7 @@ public class PolicyHelper {
       TimeSource timeSource = sessionWrapper.session != null ? sessionWrapper.session.cloudManager.getTimeSource() : TimeSource.NANO_TIME;
       synchronized (lockObj) {
         sessionWrapper.status = Status.EXECUTING;
-        log.info("returnSession, curr-time {} sessionWrapper.createTime {}, this.sessionWrapper.createTime {} ", time(timeSource, MILLISECONDS),
+        log.debug("returnSession, curr-time {} sessionWrapper.createTime {}, this.sessionWrapper.createTime {} ", time(timeSource, MILLISECONDS),
             sessionWrapper.createTime,
             this.sessionWrapper.createTime);
         if (sessionWrapper.createTime == this.sessionWrapper.createTime) {
@@ -362,7 +362,7 @@ public class PolicyHelper {
           //one thread who is waiting for this need to be notified.
           lockObj.notify();
         } else {
-          log.info("create time NOT SAME {} ", SessionWrapper.DEFAULT_INSTANCE.createTime);
+          log.debug("create time NOT SAME {} ", SessionWrapper.DEFAULT_INSTANCE.createTime);
           //else just ignore it
         }
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2369c896/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
index 97f2521..ca83ad4 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
@@ -49,10 +49,12 @@ public class ReplicaInfo implements MapWriter {
     this.collection = coll;
     this.shard = shard;
     this.type = r.getType();
-    this.isLeader = r.getBool(LEADER_PROP, false);
+    boolean maybeLeader = r.getBool(LEADER_PROP, false);
     if (vals != null) {
       this.variables.putAll(vals);
+      maybeLeader = "true".equals(String.valueOf(vals.getOrDefault(LEADER_PROP, maybeLeader)));
     }
+    this.isLeader = maybeLeader;
     this.node = r.getNodeName();
   }
 


[09/29] lucene-solr:jira/http2: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr

Posted by da...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e6e3dc7e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e6e3dc7e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e6e3dc7e

Branch: refs/heads/jira/http2
Commit: e6e3dc7ea8c626f0f4a1173f899a9d7bb1af26d1
Parents: 63fcf2e 4ccf0fb
Author: Karl Wright <Da...@gmail.com>
Authored: Sun Sep 23 06:45:59 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Sun Sep 23 06:45:59 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   3 +
 .../org/apache/lucene/index/MultiFields.java    |  27 +-
 solr/CHANGES.txt                                |  14 +-
 .../cloud/api/collections/AddReplicaCmd.java    | 334 +++++++++++++------
 .../solr/cloud/api/collections/Assign.java      |  22 +-
 .../cloud/api/collections/CreateShardCmd.java   | 157 +++------
 .../cloud/api/collections/MoveReplicaCmd.java   |   2 +-
 .../OverseerCollectionMessageHandler.java       |   2 +-
 .../cloud/api/collections/ReplaceNodeCmd.java   |   2 +-
 .../solr/handler/admin/CollectionsHandler.java  |   9 +-
 .../org/apache/solr/cloud/AddReplicaTest.java   |  90 ++++-
 .../CollectionTooManyReplicasTest.java          |   2 +-
 .../cloud/api/collections/ShardSplitTest.java   |   2 +-
 .../sim/SimClusterStateProvider.java            |  93 ++++--
 .../autoscaling/sim/TestSimPolicyCloud.java     |   2 +-
 solr/solr-ref-guide/src/collections-api.adoc    |  68 +++-
 .../solrj/request/CollectionAdminRequest.java   |  53 +++
 17 files changed, 576 insertions(+), 306 deletions(-)
----------------------------------------------------------------------



[08/29] lucene-solr:jira/http2: LUCENE-8512: Remove second test point since no longer needed, and confirm rigorously that first test point is not on an edge.

Posted by da...@apache.org.
LUCENE-8512: Remove second test point since no longer needed, and confirm rigorously that first test point is not on an edge.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/63fcf2ed
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/63fcf2ed
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/63fcf2ed

Branch: refs/heads/jira/http2
Commit: 63fcf2edcaa2d203a65f03365bb17001a6ee094f
Parents: af2de93
Author: Karl Wright <Da...@gmail.com>
Authored: Sun Sep 23 06:45:26 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Sun Sep 23 06:45:26 2018 -0400

----------------------------------------------------------------------
 .../spatial3d/geom/GeoComplexPolygon.java       | 115 +++----------------
 .../spatial3d/geom/GeoPolygonFactory.java       |  15 ++-
 .../lucene/spatial3d/geom/GeoPolygonTest.java   |   2 +-
 3 files changed, 26 insertions(+), 106 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/63fcf2ed/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
index a7c55dd..41e2529 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
@@ -48,9 +48,6 @@ class GeoComplexPolygon extends GeoBasePolygon {
   private final boolean testPoint1InSet;
   private final GeoPoint testPoint1;
 
-  private final boolean testPoint2InSet;
-  private final GeoPoint testPoint2;
-    
   private final Plane testPoint1FixedYPlane;
   private final Plane testPoint1FixedYAbovePlane;
   private final Plane testPoint1FixedYBelowPlane;
@@ -61,16 +58,6 @@ class GeoComplexPolygon extends GeoBasePolygon {
   private final Plane testPoint1FixedZAbovePlane;
   private final Plane testPoint1FixedZBelowPlane;
 
-  private final Plane testPoint2FixedYPlane;
-  private final Plane testPoint2FixedYAbovePlane;
-  private final Plane testPoint2FixedYBelowPlane;
-  private final Plane testPoint2FixedXPlane;
-  private final Plane testPoint2FixedXAbovePlane;
-  private final Plane testPoint2FixedXBelowPlane;
-  private final Plane testPoint2FixedZPlane;
-  private final Plane testPoint2FixedZAbovePlane;
-  private final Plane testPoint2FixedZBelowPlane;
-  
   private final GeoPoint[] edgePoints;
   private final Edge[] shapeStartEdges;
   
@@ -108,6 +95,9 @@ class GeoComplexPolygon extends GeoBasePolygon {
       for (final GeoPoint thisGeoPoint : shapePoints) {
         assert planetModel.pointOnSurface(thisGeoPoint) : "Polygon edge point must be on surface; "+thisGeoPoint+" is not";
         final Edge edge = new Edge(planetModel, lastGeoPoint, thisGeoPoint);
+        if (edge.isWithin(testPoint.x, testPoint.y, testPoint.z)) {
+          throw new IllegalArgumentException("Test point is on polygon edge: not allowed");
+        }
         allEdges.add(edge);
         // Now, link
         if (firstEdge == null) {
@@ -132,10 +122,6 @@ class GeoComplexPolygon extends GeoBasePolygon {
 
     // Record testPoint1 as-is
     this.testPoint1 = testPoint;
-    // Pick the antipodes for testPoint2
-    this.testPoint2 = new GeoPoint(-testPoint.x, -testPoint.y, -testPoint.z);
-    
-    assert planetModel.pointOnSurface(testPoint2.x, testPoint2.y, testPoint2.z) : "Test point 2 is off of ellipsoid";
 
     // Construct fixed planes for testPoint1
     this.testPoint1FixedYPlane = new Plane(0.0, 1.0, 0.0, -testPoint1.y);
@@ -181,69 +167,8 @@ class GeoComplexPolygon extends GeoBasePolygon {
     }
     this.testPoint1FixedZBelowPlane = testPoint1FixedZBelowPlane;
 
-    // Construct fixed planes for testPoint2
-    this.testPoint2FixedYPlane = new Plane(0.0, 1.0, 0.0, -testPoint2.y);
-    this.testPoint2FixedXPlane = new Plane(1.0, 0.0, 0.0, -testPoint2.x);
-    this.testPoint2FixedZPlane = new Plane(0.0, 0.0, 1.0, -testPoint2.z);
-    
-    Plane testPoint2FixedYAbovePlane = new Plane(testPoint2FixedYPlane, true);
-    if (-testPoint2FixedYAbovePlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumYValue() + testPoint2FixedYAbovePlane.D > NEAR_EDGE_CUTOFF) {
-        testPoint2FixedYAbovePlane = null;
-    }
-    this.testPoint2FixedYAbovePlane = testPoint2FixedYAbovePlane;
-    
-    Plane testPoint2FixedYBelowPlane = new Plane(testPoint2FixedYPlane, false);
-    if (-testPoint2FixedYBelowPlane.D - planetModel.getMaximumYValue() > NEAR_EDGE_CUTOFF ||  planetModel.getMinimumYValue() + testPoint2FixedYBelowPlane.D > NEAR_EDGE_CUTOFF) {
-        testPoint2FixedYBelowPlane = null;
-    }
-    this.testPoint2FixedYBelowPlane = testPoint2FixedYBelowPlane;
-    
-    Plane testPoint2FixedXAbovePlane = new Plane(testPoint2FixedXPlane, true);
-    if (-testPoint2FixedXAbovePlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumXValue() + testPoint2FixedXAbovePlane.D > NEAR_EDGE_CUTOFF) {
-        testPoint2FixedXAbovePlane = null;
-    }
-    this.testPoint2FixedXAbovePlane = testPoint2FixedXAbovePlane;
-    
-    Plane testPoint2FixedXBelowPlane = new Plane(testPoint2FixedXPlane, false);
-    if (-testPoint2FixedXBelowPlane.D - planetModel.getMaximumXValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumXValue() + testPoint2FixedXBelowPlane.D > NEAR_EDGE_CUTOFF) {
-        testPoint2FixedXBelowPlane = null;
-    }
-    this.testPoint2FixedXBelowPlane = testPoint2FixedXBelowPlane;
-    
-    Plane testPoint2FixedZAbovePlane = new Plane(testPoint2FixedZPlane, true);
-    if (-testPoint2FixedZAbovePlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF ||planetModel.getMinimumZValue() + testPoint2FixedZAbovePlane.D > NEAR_EDGE_CUTOFF) {
-        testPoint2FixedZAbovePlane = null;
-    }
-    this.testPoint2FixedZAbovePlane = testPoint2FixedZAbovePlane;
-    
-    Plane testPoint2FixedZBelowPlane = new Plane(testPoint2FixedZPlane, false);
-    if (-testPoint2FixedZBelowPlane.D - planetModel.getMaximumZValue() > NEAR_EDGE_CUTOFF || planetModel.getMinimumZValue() + testPoint2FixedZBelowPlane.D > NEAR_EDGE_CUTOFF) {
-        testPoint2FixedZBelowPlane = null;
-    }
-    this.testPoint2FixedZBelowPlane = testPoint2FixedZBelowPlane;
-
     // We know inset/out-of-set for testPoint1 only right now
     this.testPoint1InSet = testPointInSet;
-
-    //System.out.println("Determining in-set-ness of test point2 ("+testPoint2+"):");
-    // We must compute the crossings from testPoint1 to testPoint2 in order to figure out whether testPoint2 is in-set or out
-    this.testPoint2InSet = isInSet(testPoint2.x, testPoint2.y, testPoint2.z,
-      testPoint1, 
-      testPoint1InSet,
-      testPoint1FixedXPlane, testPoint1FixedXAbovePlane, testPoint1FixedXBelowPlane,
-      testPoint1FixedYPlane, testPoint1FixedYAbovePlane, testPoint1FixedYBelowPlane,
-      testPoint1FixedZPlane, testPoint1FixedZAbovePlane, testPoint1FixedZBelowPlane);
-    
-    //System.out.println("\n... done.  Checking against test point1 ("+testPoint1+"):");
-    
-    assert isInSet(testPoint1.x, testPoint1.y, testPoint1.z,
-      testPoint2,
-      testPoint2InSet,
-      testPoint2FixedXPlane, testPoint2FixedXAbovePlane, testPoint2FixedXBelowPlane,
-      testPoint2FixedYPlane, testPoint2FixedYAbovePlane, testPoint2FixedYBelowPlane,
-      testPoint2FixedZPlane, testPoint2FixedZAbovePlane, testPoint2FixedZBelowPlane) == testPoint1InSet : "Test point1 not correctly in/out of set according to test point2";
-
-    //System.out.println("\n... done");
   }
 
   /**
@@ -284,27 +209,12 @@ class GeoComplexPolygon extends GeoBasePolygon {
   @Override
   public boolean isWithin(final double x, final double y, final double z) {
     //System.out.println("IsWithin() for ["+x+","+y+","+z+"]");
-    try {
-      // Try with the primary test point
-      //if (true) throw new IllegalArgumentException("use second point as exercise");
-      //System.out.println(" Trying testPoint1...");
-      return isInSet(x, y, z,
-        testPoint1,
-        testPoint1InSet,
-        testPoint1FixedXPlane, testPoint1FixedXAbovePlane, testPoint1FixedXBelowPlane,
-        testPoint1FixedYPlane, testPoint1FixedYAbovePlane, testPoint1FixedYBelowPlane,
-        testPoint1FixedZPlane, testPoint1FixedZAbovePlane, testPoint1FixedZBelowPlane);
-    } catch (IllegalArgumentException e) {
-      // Try with an alternate test point
-      //e.printStackTrace(System.out);
-      //System.out.println(" Trying testPoint2...");
-      return isInSet(x, y, z,
-        testPoint2,
-        testPoint2InSet,
-        testPoint2FixedXPlane, testPoint2FixedXAbovePlane, testPoint2FixedXBelowPlane,
-        testPoint2FixedYPlane, testPoint2FixedYAbovePlane, testPoint2FixedYBelowPlane,
-        testPoint2FixedZPlane, testPoint2FixedZAbovePlane, testPoint2FixedZBelowPlane);
-    }
+    return isInSet(x, y, z,
+      testPoint1,
+      testPoint1InSet,
+      testPoint1FixedXPlane, testPoint1FixedXAbovePlane, testPoint1FixedXBelowPlane,
+      testPoint1FixedYPlane, testPoint1FixedYAbovePlane, testPoint1FixedYBelowPlane,
+      testPoint1FixedZPlane, testPoint1FixedZAbovePlane, testPoint1FixedZBelowPlane);
   }
   
   /** Given a test point, whether it is in set, and the associated planes, figure out if another point
@@ -805,7 +715,12 @@ class GeoComplexPolygon extends GeoBasePolygon {
         return edgeIterator.isOnEdge() || (((edgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet);
       }
     }
-    
+
+    @Override
+    public String toString() {
+      return "{firstLegValue="+firstLegValue+"; secondLegValue="+secondLegValue+"; firstLegPlane="+firstLegPlane+"; secondLegPlane="+secondLegPlane+"; intersectionPoint="+intersectionPoint+"}";
+    }
+
     @Override
     public int compareTo(final TraversalStrategy other) {
       if (traversalDistance < other.traversalDistance) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/63fcf2ed/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java
index 301d1cc..1937b07 100755
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java
@@ -458,11 +458,16 @@ public class GeoPolygonFactory {
       // Is it inside or outside?
       final Boolean isTestPointInside = isInsidePolygon(testPoint, points);
       if (isTestPointInside != null) {
-        // Legal pole
-        if (isTestPointInside == poleMustBeInside) {
-          return new GeoComplexPolygon(planetModel, pointsList, testPoint, isTestPointInside);
-        } else {
-          return new GeoComplexPolygon(planetModel, pointsList, new GeoPoint(-testPoint.x, -testPoint.y, -testPoint.z), !isTestPointInside);
+        try {
+          // Legal pole
+          if (isTestPointInside == poleMustBeInside) {
+            return new GeoComplexPolygon(planetModel, pointsList, testPoint, isTestPointInside);
+          } else {
+            return new GeoComplexPolygon(planetModel, pointsList, new GeoPoint(-testPoint.x, -testPoint.y, -testPoint.z), !isTestPointInside);
+          }
+        } catch (IllegalArgumentException e) {
+          // Probably bad choice of test point.
+          return null;
         }
       }
       // If pole choice was illegal, try another one

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/63fcf2ed/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index 69be3d8..f2388a9 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -1911,7 +1911,7 @@ shape:
 
   }
   
-  @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8512")
+  //@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8512")
   public void testLUCENE8512() {
     //POLYGON((35.4190030282028 -67.85799140154762,35.420218772379776 -67.85786846162631,35.42021877254679 -67.85786846168897,35.420218772734266 -67.85786846168025,35.4190030282028 -67.85799140154762))
     final List<GeoPoint> points = new ArrayList<>();


[04/29] lucene-solr:jira/http2: LUCENE-8512: Add disabled test for failure.

Posted by da...@apache.org.
LUCENE-8512: Add disabled test for failure.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/af2de934
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/af2de934
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/af2de934

Branch: refs/heads/jira/http2
Commit: af2de93451bbf0cfffb00dc785feef1a94633644
Parents: dd088fb
Author: Karl Wright <Da...@gmail.com>
Authored: Fri Sep 21 03:46:38 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Fri Sep 21 03:46:38 2018 -0400

----------------------------------------------------------------------
 .../lucene/spatial3d/geom/GeoPolygonTest.java     | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/af2de934/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index dbdfa5e..69be3d8 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -1910,5 +1910,23 @@ shape:
     assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point));
 
   }
+  
+  @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8512")
+  public void testLUCENE8512() {
+    //POLYGON((35.4190030282028 -67.85799140154762,35.420218772379776 -67.85786846162631,35.42021877254679 -67.85786846168897,35.420218772734266 -67.85786846168025,35.4190030282028 -67.85799140154762))
+    final List<GeoPoint> points = new ArrayList<>();
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85799140154762), Geo3DUtil.fromDegrees(35.4190030282028)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85786846162631), Geo3DUtil.fromDegrees(35.420218772379776)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85786846168897), Geo3DUtil.fromDegrees(35.42021877254679)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-67.85786846168025), Geo3DUtil.fromDegrees(35.420218772734266)));
+
+    final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points);
+    final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description);
+    final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description));
 
+    //POINT(179.99999999999983 -5.021400461974724E-11)
+    final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.021400461974724E-11), Geo3DUtil.fromDegrees(179.99999999999983));
+    assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point));
+
+  }
 }


[14/29] lucene-solr:jira/http2: SOLR-12759: fix regexp

Posted by da...@apache.org.
SOLR-12759: fix regexp


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9bc4b8d4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9bc4b8d4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9bc4b8d4

Branch: refs/heads/jira/http2
Commit: 9bc4b8d4fe3bb220ca3a27fb252b703b39443a3c
Parents: 3f2975c
Author: David Smiley <ds...@apache.org>
Authored: Mon Sep 24 15:21:59 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Mon Sep 24 15:21:59 2018 -0400

----------------------------------------------------------------------
 .../solr/handler/extraction/ExtractingRequestHandlerTest.java  | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9bc4b8d4/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
index 4401745..132b371 100644
--- a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
+++ b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
@@ -45,8 +45,10 @@ public class ExtractingRequestHandlerTest extends SolrTestCaseJ4 {
   @BeforeClass
   public static void beforeClass() throws Exception {
     // Is the JDK/env affected by a known bug?
-    if (!TimeZone.getDefault().getDisplayName(false, TimeZone.SHORT, Locale.US).matches("[A-Z]{3,4}([+-]\\d\\d(:\\d\\d)?)?")) {
-      assert System.getProperty("java.version").startsWith("11") : "Is some other JVM affected?  Or bad regex?";
+    final String tzDisplayName = TimeZone.getDefault().getDisplayName(false, TimeZone.SHORT, Locale.US);
+    if (!tzDisplayName.matches("[A-Z]{3,}([+-]\\d\\d(:\\d\\d)?)?")) {
+      assertTrue("Is some other JVM affected?  Or bad regex? TzDisplayName: " + tzDisplayName,
+          System.getProperty("java.version").startsWith("11"));
       assumeTrue("SOLR-12759 JDK 11 (1st release) and Tika 1.x can result in extracting dates in a bad format.", false);
     }
 


[03/29] lucene-solr:jira/http2: SOLR-12028: BadApple and AwaitsFix annotations usage

Posted by da...@apache.org.
SOLR-12028: BadApple and AwaitsFix annotations usage


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/dd088fb8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/dd088fb8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/dd088fb8

Branch: refs/heads/jira/http2
Commit: dd088fb83eeb48c752cc69a5dd173aa1c5224bc9
Parents: 1d604d1
Author: Erick Erickson <Er...@gmail.com>
Authored: Thu Sep 20 22:31:49 2018 -0700
Committer: Erick Erickson <Er...@gmail.com>
Committed: Thu Sep 20 22:31:49 2018 -0700

----------------------------------------------------------------------
 .../test/org/apache/solr/cloud/MoveReplicaHDFSTest.java  |  3 ++-
 .../test/org/apache/solr/cloud/MultiThreadedOCPTest.java |  2 +-
 .../solr/cloud/TestLeaderInitiatedRecoveryThread.java    |  1 +
 .../apache/solr/cloud/TestSkipOverseerOperations.java    |  3 +++
 .../solr/cloud/TestSolrCloudWithDelegationTokens.java    |  2 +-
 .../test/org/apache/solr/cloud/TestWithCollection.java   |  2 ++
 .../org/apache/solr/cloud/UnloadDistributedZkTest.java   |  1 +
 .../CollectionsAPIAsyncDistributedZkTest.java            |  1 +
 .../solr/cloud/autoscaling/ScheduledTriggerTest.java     |  1 +
 .../org/apache/solr/handler/TestReplicationHandler.java  |  2 +-
 .../handler/component/DistributedDebugComponentTest.java |  4 +++-
 .../org/apache/solr/client/solrj/SolrExceptionTest.java  |  3 +++
 .../client/solrj/beans/TestDocumentObjectBinder.java     |  4 ++++
 .../client/solrj/impl/CloudSolrClientBuilderTest.java    |  6 ++++++
 .../solrj/impl/CloudSolrClientMultiConstructorTest.java  |  3 +++
 .../impl/ConcurrentUpdateSolrClientBuilderTest.java      |  1 +
 .../solr/client/solrj/impl/HttpClientUtilTest.java       |  6 +++++-
 .../solr/client/solrj/impl/LBHttpSolrClientTest.java     |  2 ++
 .../solr/client/solrj/io/stream/MathExpressionTest.java  |  1 +
 .../solrj/io/stream/StreamExpressionToExpessionTest.java |  2 ++
 .../io/stream/StreamExpressionToExplanationTest.java     |  2 ++
 .../client/solrj/request/TestCollectionAdminRequest.java |  5 +++++
 .../client/solrj/request/TestUpdateRequestCodec.java     |  5 ++++-
 .../solr/client/solrj/request/TestV1toV2ApiMapper.java   | 11 +++++++++--
 .../solr/client/solrj/response/QueryResponseTest.java    |  6 +++++-
 .../solrj/response/TestDelegationTokenResponse.java      |  2 ++
 .../org/apache/solr/common/TestToleratedUpdateError.java |  5 ++++-
 .../org/apache/solr/common/params/ShardParamsTest.java   |  5 ++++-
 .../test/org/apache/solr/common/util/NamedListTest.java  |  7 ++++++-
 .../org/apache/solr/common/util/TestFastInputStream.java |  1 +
 .../org/apache/solr/common/util/TestNamedListCodec.java  | 11 +++++++++--
 31 files changed, 95 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
index 557810f..989d39b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
@@ -69,7 +69,8 @@ public class MoveReplicaHDFSTest extends MoveReplicaTest {
   //commented 9-Aug-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
   //commented 23-AUG-2018  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
   // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
+  //commented 20-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testNormalFailedMove() throws Exception {
     inPlaceMove = false;
     testFailedMove();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java b/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
index 6fbd97a..7621c02 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
@@ -59,7 +59,7 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
 
   @Test
 // commented 20-July-2018   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
+//commented 20-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
   @ShardsFixed(num = 4)
   public void test() throws Exception {
     testParallelCollectionAPICalls();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java b/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java
index e4f4cda..ce9d9ad 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java
@@ -46,6 +46,7 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
 
   @Test
   //17-Aug-2018 commented @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 09-Apr-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testPublishDownState() throws Exception {
     waitForRecoveriesToFinish(true);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java b/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java
index 2039249..c18fb92 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java
@@ -26,6 +26,7 @@ import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.junit.BeforeClass;
+import org.junit.Test;
 
 public class TestSkipOverseerOperations extends SolrCloudTestCase {
 
@@ -73,6 +74,8 @@ public class TestSkipOverseerOperations extends SolrCloudTestCase {
     CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
   }
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSkipDownOperations() throws Exception {
     String overseerLeader = getOverseerLeader();
     List<JettySolrRunner> notOverseerNodes = cluster.getJettySolrRunners()

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
index c6aac78..9e260d2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
@@ -327,7 +327,7 @@ public class TestSolrCloudWithDelegationTokens extends SolrTestCaseJ4 {
   }
 
   @Test
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
+  //commented 20-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
   public void testDelegationTokenRenew() throws Exception {
     // test with specifying renewer
     verifyDelegationTokenRenew("bar", "bar");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java b/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
index d21b32b..b23d035 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
@@ -260,6 +260,8 @@ public class TestWithCollection extends SolrCloudTestCase {
     assertTrue(collection.getReplicas().stream().noneMatch(replica -> withCollection.getReplicas(replica.getNodeName()).isEmpty()));
   }
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testAddReplicaWithPolicy() throws Exception {
     String prefix = "testAddReplicaWithPolicy";
     String xyz = prefix + "_xyz";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
index e34d4f7..3e0e71a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
@@ -65,6 +65,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
 
   @Test
   //28-June-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void test() throws Exception {
     
     testCoreUnloadAndLeaders(); // long

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
index 0cd0456..00113a9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
@@ -89,6 +89,7 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
 
   @Test
   //commented 9-Aug-2018  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testAsyncRequests() throws Exception {
 
     final String collection = "testAsyncOperations";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerTest.java
index bff2f5a..f4344cf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerTest.java
@@ -58,6 +58,7 @@ public class ScheduledTriggerTest extends SolrCloudTestCase {
   @Test
 //2018-06-18 (commented)   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 09-Apr-2018
 //commented 23-AUG-2018   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testTrigger() throws Exception {
     CoreContainer container = cluster.getJettySolrRunners().get(0).getCoreContainer();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
index 28ed3d1..562547c 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
@@ -832,7 +832,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
   
   
   @Test
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
+  //commented 20-Sep-2018  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
   public void doTestStressReplication() throws Exception {
     // change solrconfig on slave
     // this has no entry for pollinginterval

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/core/src/test/org/apache/solr/handler/component/DistributedDebugComponentTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedDebugComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedDebugComponentTest.java
index c0a40af..105c0b5 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedDebugComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedDebugComponentTest.java
@@ -342,7 +342,9 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
     response = client.query(query);
     assertNull(response.getDebugMap());
   }
-  
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testCompareWithNonDistributedRequest() throws SolrServerException, IOException {
     SolrQuery query = new SolrQuery();
     query.setQuery("id:1 OR id:2");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExceptionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExceptionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExceptionTest.java
index 358dcc7..c3dec85 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExceptionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExceptionTest.java
@@ -22,6 +22,7 @@ import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.junit.Test;
 
 /**
  * 
@@ -30,6 +31,8 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient;
  */
 public class SolrExceptionTest extends LuceneTestCase {
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSolrException() throws Throwable {
     // test a connection to a solr server that probably doesn't exist
     // this is a very simple test and most of the test should be considered verified 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java b/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java
index ee55521..327a034 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java
@@ -36,6 +36,8 @@ import java.util.Map;
 
 public class TestDocumentObjectBinder extends LuceneTestCase {
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSimple() throws Exception {
     DocumentObjectBinder binder = new DocumentObjectBinder();
     XMLResponseParser parser = new XMLResponseParser();
@@ -79,6 +81,8 @@ public class TestDocumentObjectBinder extends LuceneTestCase {
     assertEquals("hello", l.get(0).categories[0]);
   }
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testDynamicFieldBinding() {
     DocumentObjectBinder binder = new DocumentObjectBinder();
     XMLResponseParser parser = new XMLResponseParser();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
index ad6660c..833d4b3 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
@@ -40,6 +40,7 @@ public class CloudSolrClientBuilderTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSingleZkHostSpecified() throws IOException {
     try(CloudSolrClient createdClient = new Builder(Collections.singletonList(ANY_ZK_HOST), Optional.of(ANY_CHROOT))
         .build()) {
@@ -50,6 +51,7 @@ public class CloudSolrClientBuilderTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSeveralZkHostsSpecifiedSingly() throws IOException {
     final List<String> zkHostList = new ArrayList<>();
     zkHostList.add(ANY_ZK_HOST); zkHostList.add(ANY_OTHER_ZK_HOST);
@@ -63,6 +65,7 @@ public class CloudSolrClientBuilderTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSeveralZkHostsSpecifiedTogether() throws IOException {
     final ArrayList<String> zkHosts = new ArrayList<String>();
     zkHosts.add(ANY_ZK_HOST);
@@ -76,6 +79,7 @@ public class CloudSolrClientBuilderTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testByDefaultConfiguresClientToSendUpdatesOnlyToShardLeaders() throws IOException {
     try(CloudSolrClient createdClient = new Builder(Collections.singletonList(ANY_ZK_HOST), Optional.of(ANY_CHROOT)).build()) {
       assertTrue(createdClient.isUpdatesToLeaders() == true);
@@ -83,6 +87,7 @@ public class CloudSolrClientBuilderTest extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testIsDirectUpdatesToLeadersOnlyDefault() throws IOException {
     try(CloudSolrClient createdClient = new Builder(Collections.singletonList(ANY_ZK_HOST), Optional.of(ANY_CHROOT)).build()) {
       assertFalse(createdClient.isDirectUpdatesToLeadersOnly());
@@ -90,6 +95,7 @@ public class CloudSolrClientBuilderTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void test0Timeouts() throws IOException {
     try(CloudSolrClient createdClient = new Builder(Collections.singletonList(ANY_ZK_HOST), Optional.empty())
         .withSocketTimeout(0)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
index dd0328c..d923053 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
@@ -37,6 +37,7 @@ public class CloudSolrClientMultiConstructorTest extends LuceneTestCase {
   Collection<String> hosts;
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testZkConnectionStringSetterWithValidChroot() throws IOException {
     boolean setOrList = random().nextBoolean();
     int numOfZKServers = TestUtil.nextInt(random(), 1, 5);
@@ -75,6 +76,7 @@ public class CloudSolrClientMultiConstructorTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testZkConnectionStringConstructorWithValidChroot() throws IOException {
     int numOfZKServers = TestUtil.nextInt(random(), 1, 5);
     boolean withChroot = random().nextBoolean();
@@ -102,6 +104,7 @@ public class CloudSolrClientMultiConstructorTest extends LuceneTestCase {
   }
   
   @Test(expected = IllegalArgumentException.class)
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testBadChroot() {
     final List<String> zkHosts = new ArrayList<>();
     zkHosts.add("host1:2181");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBuilderTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBuilderTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBuilderTest.java
index 504537b..65c2f1f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBuilderTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBuilderTest.java
@@ -32,6 +32,7 @@ public class ConcurrentUpdateSolrClientBuilderTest extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testMissingQueueSize() {
     try (ConcurrentUpdateSolrClient client = new Builder("someurl").build()){
       // Do nothing as we just need to test that the only mandatory parameter for building the client

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClientUtilTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClientUtilTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClientUtilTest.java
index ce2f8b7..8eaba22 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClientUtilTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClientUtilTest.java
@@ -44,7 +44,9 @@ public class HttpClientUtilTest extends LuceneTestCase {
   public void resetHttpClientBuilder() {
     HttpClientUtil.resetHttpClientBuilder();
   }
-    
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSSLSystemProperties() throws IOException {
     
     assertNotNull("HTTPS scheme could not be created using system defaults",
@@ -83,6 +85,7 @@ public class HttpClientUtilTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testToBooleanDefaultIfNull() throws Exception {
     assertFalse(HttpClientUtil.toBooleanDefaultIfNull(Boolean.FALSE, true));
     assertTrue(HttpClientUtil.toBooleanDefaultIfNull(Boolean.TRUE, false));
@@ -91,6 +94,7 @@ public class HttpClientUtilTest extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testToBooleanObject() throws Exception {
     assertEquals(Boolean.TRUE, HttpClientUtil.toBooleanObject("true"));
     assertEquals(Boolean.TRUE, HttpClientUtil.toBooleanObject("TRUE"));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientTest.java
index 34804c4..cdc4aee 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientTest.java
@@ -17,6 +17,7 @@
 package org.apache.solr.client.solrj.impl;
 
 import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.ResponseParser;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.junit.Test;
@@ -37,6 +38,7 @@ public class LBHttpSolrClientTest {
    * Validate that the parser passed in is used in the <code>HttpSolrClient</code> instances created.
    */
   @Test
+  @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testLBHttpSolrClientHttpClientResponseParserStringArray() throws IOException {
     CloseableHttpClient httpClient = HttpClientUtil.createClient(new ModifiableSolrParams());
     try (

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
index 137add6..50b8f85 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
@@ -3113,6 +3113,7 @@ public class MathExpressionTest extends SolrCloudTestCase {
 
   @Test
   // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") //2018-03-10
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testMultiVariateNormalDistribution() throws Exception {
     String cexpr = "let(echo=true," +
         "     a=array(1,2,3,4,5,6,7)," +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
index 9327ee9..e43176a 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
@@ -107,6 +107,7 @@ public class StreamExpressionToExpessionTest extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testDaemonStream() throws Exception {
     String expressionString;
 
@@ -219,6 +220,7 @@ public class StreamExpressionToExpessionTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testUpdateStream() throws Exception {
     StreamExpression expression = StreamExpressionParser.parse("update("
                                                                + "collection2, "

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExplanationTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExplanationTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExplanationTest.java
index c1c5369..d1ceebc 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExplanationTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExplanationTest.java
@@ -88,6 +88,7 @@ public class StreamExpressionToExplanationTest extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testDaemonStream() throws Exception {
     // Basic test
     try (DaemonStream stream = new DaemonStream(StreamExpressionParser.parse("daemon(search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), id=\"blah\", runInterval=\"1000\", queueSize=\"100\")"), factory)) {
@@ -174,6 +175,7 @@ public class StreamExpressionToExplanationTest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testUpdateStream() throws Exception {
     StreamExpression expression = StreamExpressionParser.parse("update("
                                                                + "collection2, "

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCollectionAdminRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCollectionAdminRequest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCollectionAdminRequest.java
index 29b273e..803898e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCollectionAdminRequest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCollectionAdminRequest.java
@@ -29,6 +29,7 @@ import org.junit.Test;
 public class TestCollectionAdminRequest extends LuceneTestCase {
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testInvalidCollectionNameRejectedWhenCreatingCollection() {
     final SolrException e = expectThrows(SolrException.class, () -> {
         CollectionAdminRequest.createCollection("invalid$collection@name", null, 1, 1);
@@ -40,6 +41,7 @@ public class TestCollectionAdminRequest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testInvalidShardNamesRejectedWhenCreatingImplicitCollection() {
     final SolrException e = expectThrows(SolrException.class, () -> {
         CollectionAdminRequest.createCollectionWithImplicitRouter("fine", "fine", "invalid$shard@name",1,0,0);
@@ -51,6 +53,7 @@ public class TestCollectionAdminRequest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testInvalidShardNamesRejectedWhenCallingSetShards() {
     CollectionAdminRequest.Create request = CollectionAdminRequest.createCollectionWithImplicitRouter("fine",null,"fine",1);
     final SolrException e = expectThrows(SolrException.class, () -> {
@@ -63,6 +66,7 @@ public class TestCollectionAdminRequest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testInvalidAliasNameRejectedWhenCreatingAlias() {
     final SolrException e = expectThrows(SolrException.class, () -> {
         CreateAlias createAliasRequest = CollectionAdminRequest.createAlias("invalid$alias@name","ignored");
@@ -74,6 +78,7 @@ public class TestCollectionAdminRequest extends LuceneTestCase {
   }
   
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testInvalidShardNameRejectedWhenCreatingShard() {
     final SolrException e = expectThrows(SolrException.class, () -> {
         CreateShard createShardRequest = CollectionAdminRequest.createShard("ignored","invalid$shard@name");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java
index df7beea..2db72ec 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java
@@ -44,6 +44,7 @@ import org.junit.Test;
 public class TestUpdateRequestCodec extends LuceneTestCase {
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void simple() throws IOException {
     UpdateRequest updateRequest = new UpdateRequest();
     updateRequest.deleteById("*:*");
@@ -110,6 +111,7 @@ public class TestUpdateRequestCodec extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testIteratable() throws IOException {
     final List<String> values = new ArrayList<>();
     values.add("iterItem1");
@@ -159,7 +161,8 @@ public class TestUpdateRequestCodec extends LuceneTestCase {
   }
 
 
-
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testBackCompat4_5() throws IOException {
 
     UpdateRequest updateRequest = new UpdateRequest();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
index 21806da..b5ff3f0 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
@@ -26,9 +26,12 @@ import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
 import org.apache.solr.common.util.ContentStreamBase;
 import org.apache.solr.common.util.Utils;
+import org.junit.Test;
 
 public class TestV1toV2ApiMapper extends LuceneTestCase {
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testCreate() throws IOException {
     Create cmd = CollectionAdminRequest
         .createCollection("mycoll", "conf1", 3, 2)
@@ -44,7 +47,9 @@ public class TestV1toV2ApiMapper extends LuceneTestCase {
     assertEquals("3", Utils.getObjectByPath(m,true,"/create/numShards"));
     assertEquals("2", Utils.getObjectByPath(m,true,"/create/nrtReplicas"));
   }
-  
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testAddReplica() throws IOException {
     CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard("mycoll", "shard1");
     V2Request v2r = V1toV2ApiMapper.convert(addReplica).build();
@@ -53,7 +58,9 @@ public class TestV1toV2ApiMapper extends LuceneTestCase {
     assertEquals("shard1", Utils.getObjectByPath(m,true,"/add-replica/shard"));
     assertEquals("NRT", Utils.getObjectByPath(m,true,"/add-replica/type"));
   }
-  
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSetCollectionProperty() throws IOException {
     CollectionAdminRequest.CollectionProp collectionProp = CollectionAdminRequest.setCollectionProperty("mycoll", "prop", "value");
     V2Request v2r = V1toV2ApiMapper.convert(collectionProp).build();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/response/QueryResponseTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/response/QueryResponseTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/response/QueryResponseTest.java
index a567116..69b2b9f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/response/QueryResponseTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/response/QueryResponseTest.java
@@ -41,6 +41,7 @@ import org.junit.Test;
 @Limit(bytes=20000)
 public class QueryResponseTest extends LuceneTestCase {
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testRangeFacets() throws Exception {
     XMLResponseParser parser = new XMLResponseParser();
     NamedList<Object> response = null;
@@ -102,6 +103,7 @@ public class QueryResponseTest extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testGroupResponse() throws Exception {
     XMLResponseParser parser = new XMLResponseParser();
     NamedList<Object> response = null;
@@ -207,6 +209,7 @@ public class QueryResponseTest extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSimpleGroupResponse() throws Exception {
     XMLResponseParser parser = new XMLResponseParser();
     NamedList<Object> response = null;
@@ -251,7 +254,8 @@ public class QueryResponseTest extends LuceneTestCase {
     assertEquals("708_HI", documents.get(9).getFieldValue("acco_id"));
   }
   
-  
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testIntervalFacetsResponse() throws Exception {
     XMLResponseParser parser = new XMLResponseParser();
     try(SolrResourceLoader loader = new SolrResourceLoader()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestDelegationTokenResponse.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestDelegationTokenResponse.java b/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestDelegationTokenResponse.java
index 6d4fa28..2f83c3d 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestDelegationTokenResponse.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestDelegationTokenResponse.java
@@ -62,6 +62,7 @@ public class TestDelegationTokenResponse extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testGetResponse() throws Exception {
     DelegationTokenRequest.Get getRequest = new DelegationTokenRequest.Get();
     DelegationTokenResponse.Get getResponse = new DelegationTokenResponse.Get();
@@ -97,6 +98,7 @@ public class TestDelegationTokenResponse extends LuceneTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testRenewResponse() throws Exception {
     DelegationTokenRequest.Renew renewRequest = new DelegationTokenRequest.Renew("token");
     DelegationTokenResponse.Renew renewResponse = new DelegationTokenResponse.Renew();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/common/TestToleratedUpdateError.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/TestToleratedUpdateError.java b/solr/solrj/src/test/org/apache/solr/common/TestToleratedUpdateError.java
index aba07ae..36bac28 100644
--- a/solr/solrj/src/test/org/apache/solr/common/TestToleratedUpdateError.java
+++ b/solr/solrj/src/test/org/apache/solr/common/TestToleratedUpdateError.java
@@ -23,6 +23,7 @@ import org.apache.solr.common.util.SimpleOrderedMap;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
+import org.junit.Test;
 
 /** Basic testing of the serialization/encapsulation code in ToleratedUpdateError */
 public class TestToleratedUpdateError extends LuceneTestCase {
@@ -48,7 +49,9 @@ public class TestToleratedUpdateError extends LuceneTestCase {
     String badKey = valid.getMetadataKey().replace(":", "X");
     assertNull(ToleratedUpdateError.parseMetadataIfToleratedUpdateError(badKey, valid.getMetadataValue()));
   }
-  
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testParseMapErrorChecking() {
     SimpleOrderedMap<String> bogus = new SimpleOrderedMap<String>();
     try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java b/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
index 18b3a1b..5694972 100644
--- a/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
@@ -18,6 +18,7 @@ package org.apache.solr.common.params;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.common.SolrException;
+import org.junit.Test;
 
 /**
  * This class tests backwards compatibility of {@link ShardParams} parameter constants.
@@ -49,7 +50,9 @@ public class ShardParamsTest extends LuceneTestCase
   public void testRoute() { assertEquals(ShardParams._ROUTE_, "_route_"); }
   
   public void testDistribSinglePass() { assertEquals(ShardParams.DISTRIB_SINGLE_PASS, "distrib.singlePass"); }
-  
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testGetShardsTolerantAsBool() {
     ModifiableSolrParams params = new ModifiableSolrParams();
     // shards.tolerant param is not set; default should be false

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java b/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java
index 8605cd9..8a87f48 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.common.SolrException;
+import org.junit.Test;
 
 public class NamedListTest extends LuceneTestCase {
   public void testRemove() {
@@ -62,7 +63,9 @@ public class NamedListTest extends LuceneTestCase {
     assertEquals(5, values.size());
     assertEquals(0, nl.size());
   }
-  
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testRemoveArgs() {
     NamedList<Object> nl = new NamedList<>();
     nl.add("key1", "value1-1");
@@ -187,6 +190,8 @@ public class NamedListTest extends LuceneTestCase {
     assertNull(enltest4);
   }
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testShallowMap() {
     NamedList nl = new NamedList();
     nl.add("key1", "Val1");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/common/util/TestFastInputStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestFastInputStream.java b/solr/solrj/src/test/org/apache/solr/common/util/TestFastInputStream.java
index a30d7ee..863de18 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/TestFastInputStream.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/TestFastInputStream.java
@@ -31,6 +31,7 @@ import java.util.zip.GZIPOutputStream;
  */
 public class TestFastInputStream extends LuceneTestCase {
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testgzip() throws Exception {
     ByteArrayOutputStream b = new ByteArrayOutputStream();
     FastOutputStream fos = new FastOutputStream(b);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dd088fb8/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java b/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java
index 77ba622..ce01c09 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java
@@ -20,6 +20,7 @@ import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.BaseDistributedSearchTestCase;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
+import org.junit.Test;
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayInputStream;
@@ -30,6 +31,8 @@ import java.util.Map;
 import java.util.HashMap;
 
 public class TestNamedListCodec  extends LuceneTestCase {
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testSimple() throws Exception{
 
     NamedList nl = new NamedList();
@@ -92,6 +95,8 @@ public class TestNamedListCodec  extends LuceneTestCase {
     assertEquals(101, ((List)list.get(1).getFieldValue("f")).get(1));
   }
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testIterator() throws Exception{
     
     NamedList nl = new NamedList();
@@ -131,6 +136,8 @@ public class TestNamedListCodec  extends LuceneTestCase {
     assertEquals(list.size(), l.size());
   }
 
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testIterable() throws Exception {
     
 
@@ -245,8 +252,8 @@ public class TestNamedListCodec  extends LuceneTestCase {
     }
   }
 
-
-
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testRandom() throws Exception {
     // Random r = random;
     // let's keep it deterministic since just the wrong


[22/29] lucene-solr:jira/http2: LUCENE-8493: Stop publishing insecure .sha1 files with releases

Posted by da...@apache.org.
LUCENE-8493: Stop publishing insecure .sha1 files with releases


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/03c9c043
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/03c9c043
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/03c9c043

Branch: refs/heads/jira/http2
Commit: 03c9c04353ce1b5ace33fddd5bd99059e63ed507
Parents: 667b829
Author: Jan Høydahl <ja...@apache.org>
Authored: Wed Sep 26 15:31:26 2018 +0200
Committer: Jan Høydahl <ja...@apache.org>
Committed: Wed Sep 26 15:31:26 2018 +0200

----------------------------------------------------------------------
 dev-tools/scripts/prep-solr-ref-guide-rc.sh    |  4 +---
 dev-tools/scripts/smokeTestRelease.py          | 13 ++-----------
 lucene/CHANGES.txt                             |  2 ++
 lucene/common-build.xml                        |  1 -
 solr/solr-ref-guide/src/meta-docs/publish.adoc |  7 +++----
 5 files changed, 8 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03c9c043/dev-tools/scripts/prep-solr-ref-guide-rc.sh
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/prep-solr-ref-guide-rc.sh b/dev-tools/scripts/prep-solr-ref-guide-rc.sh
index ff9985d..a7bcc3b 100755
--- a/dev-tools/scripts/prep-solr-ref-guide-rc.sh
+++ b/dev-tools/scripts/prep-solr-ref-guide-rc.sh
@@ -20,7 +20,7 @@
 # Prepares an RC of the Solr Ref Guide by doing local file operations to:
 #  - create a directory for the RC files
 #  - move the PDF files into the RC directory with the appropriate name
-#  - generate SHA1 and SHA512 of the PDF file
+#  - generate SHA512 of the PDF file
 #  - GPG sign the PDF files
 #
 # See: https://cwiki.apache.org/confluence/display/solr/Internal+-+How+To+Publish+This+Documentation
@@ -63,7 +63,6 @@ fi
 PREFIX="apache-solr-ref-guide"
 DIR="$PREFIX-$VER_RC"
 PDF="$PREFIX-$VER.pdf"
-SHA="$PDF.sha1"
 SHA512="$PDF.sha512"
 GPG="$PDF.asc"
 
@@ -85,7 +84,6 @@ set -x
 mkdir $DIR || exit 1
 mv $SRC_FILE $DIR/$PDF || exit 1
 cd $DIR || exit 1
-shasum $PDF > $SHA || exit 1
 shasum -a 512 $PDF > $SHA512 || exit 1
 gpg $GPG_ID_ARG --armor --output $GPG --detach-sig $PDF|| exit 1
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03c9c043/dev-tools/scripts/smokeTestRelease.py
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py
index 70bea24..82f3d27 100644
--- a/dev-tools/scripts/smokeTestRelease.py
+++ b/dev-tools/scripts/smokeTestRelease.py
@@ -295,7 +295,7 @@ def checkSigs(project, urlString, version, tmpDir, isSigned, keysFile):
   expectedSigs = []
   if isSigned:
     expectedSigs.append('asc')
-  expectedSigs.extend(['sha1', 'sha512'])
+  expectedSigs.extend(['sha512'])
 
   artifacts = []
   for text, subURL in ents:
@@ -538,29 +538,20 @@ def run(command, logFile):
     raise RuntimeError('command "%s" failed; see log file %s' % (command, logPath))
     
 def verifyDigests(artifact, urlString, tmpDir):
-  print('    verify sha1/sha512 digests')
-  sha1Expected, t = load(urlString + '.sha1').strip().split()
-  if t != '*'+artifact:
-    raise RuntimeError('SHA1 %s.sha1 lists artifact %s but expected *%s' % (urlString, t, artifact))
-
+  print('    verify sha512 digest')
   sha512Expected, t = load(urlString + '.sha512').strip().split()
   if t != '*'+artifact:
     raise RuntimeError('SHA512 %s.sha512 lists artifact %s but expected *%s' % (urlString, t, artifact))
   
-  s = hashlib.sha1()
   s512 = hashlib.sha512()
   f = open('%s/%s' % (tmpDir, artifact), 'rb')
   while True:
     x = f.read(65536)
     if len(x) == 0:
       break
-    s.update(x)
     s512.update(x)
   f.close()
-  sha1Actual = s.hexdigest()
   sha512Actual = s512.hexdigest()
-  if sha1Actual != sha1Expected:
-    raise RuntimeError('SHA1 digest mismatch for %s: expected %s but got %s' % (artifact, sha1Expected, sha1Actual))
   if sha512Actual != sha512Expected:
     raise RuntimeError('SHA512 digest mismatch for %s: expected %s but got %s' % (artifact, sha512Expected, sha512Actual))
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03c9c043/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 60afb58..3b10c16 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -182,6 +182,8 @@ Build
 
 * LUCENE-8504: Upgrade forbiddenapis to version 2.6.  (Uwe Schindler)
 
+* LUCENE-8493: Stop publishing insecure .sha1 files with releases (janhoy)
+
 ======================= Lucene 7.5.1 =======================
 
 Bug Fixes:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03c9c043/lucene/common-build.xml
----------------------------------------------------------------------
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 0d38822..1a6839b 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -2296,7 +2296,6 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
     <attribute name="file"/>
     <sequential>
       <echo>Building checksums for '@{file}'</echo>
-      <checksum file="@{file}" algorithm="sha1" fileext=".sha1" format="MD5SUM" forceoverwrite="yes" readbuffersize="65536"/>
       <checksum file="@{file}" algorithm="SHA-512" fileext=".sha512" format="MD5SUM" forceoverwrite="yes" readbuffersize="65536"/>
     </sequential>
   </macrodef>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03c9c043/solr/solr-ref-guide/src/meta-docs/publish.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/meta-docs/publish.adoc b/solr/solr-ref-guide/src/meta-docs/publish.adoc
index 78ef290..70112ba 100644
--- a/solr/solr-ref-guide/src/meta-docs/publish.adoc
+++ b/solr/solr-ref-guide/src/meta-docs/publish.adoc
@@ -71,7 +71,7 @@ These steps walk through checking out this directory and uploading the Guide to
 IMPORTANT: The next step requires that you have already generated your GPG keys. Your GPG passphrase will be required.
 
 [start=3]
-. Run the Prep Ref Guide script to prepare the RC. This script ensures proper naming of the PDF file, generates `.sha1`,
+. Run the Prep Ref Guide script to prepare the RC. This script ensures proper naming of the PDF file, generates 
  `.sha512` and `.asc` files and creates the proper RC sub-directories under `solr-ref-guide-rc`.
 .. The structure of the input is: `prep-solr-ref-guide-rc.sh <path/PDFfilename> <Solrversion-RC#> GPGkey`.
 .. From the `solr-ref-guide-rc` directory, it will look something like this:
@@ -83,7 +83,6 @@ $ ~/lucene-source/dev-tools/scripts/prep-solr-ref-guide-rc.sh apache-solr-ref-gu
 + mkdir apache-solr-ref-guide-7.0-RC0
 + mv apache-solr-ref-guide-7.0.pdf apache-solr-ref-guide-7.0-RC0/apache-solr-ref-guide-7.0.pdf
 + cd apache-solr-ref-guide-7.0-RC0
-+ shasum apache-solr-ref-guide-7.0.pdf
 + shasum -a 512 apache-solr-ref-guide-7.0.pdf
 + gpg -u DEADBEEF --armor --output apache-solr-ref-guide-7.0.pdf.asc --detach-sig apache-solr-ref-guide-7.0.pdf
 
@@ -157,7 +156,7 @@ Once at least three PMC members have voted for release (see https://www.apache.o
 $ ~/lucene-source/dev-tools/scripts/publish-solr-ref-guide-rc.sh X.Y-RCZ
 
 ## Run the following commands when ready...
-svn move -m 'publishing apache-solr-ref-guide-X.Y-RCZ' https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ/apache-solr-ref-guide-X.Y.pdf https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ/apache-solr-ref-guide-X.Y.pdf.asc https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ/apache-solr-ref-guide-X.Y.pdf.sha1 https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ/apache-solr-ref-guide-X.Y.pdf.sha512 https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/
+svn move -m 'publishing apache-solr-ref-guide-X.Y-RCZ' https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ/apache-solr-ref-guide-X.Y.pdf https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ/apache-solr-ref-guide-X.Y.pdf.asc https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ/apache-solr-ref-guide-X.Y.pdf.sha512 https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/
 
 svn rm -m 'cleaning up apache-solr-ref-guide-X.Y-RCZ' https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RCZ
 ----
@@ -182,7 +181,7 @@ $ ~/lucene-source/dev-tools/scripts/archive-solr-ref-guide.sh X.Y
 ## Run the following commands when ready...
 
 # Delete old releases
-svn rm -m 'removing archived ref guide files prior to X.Y' https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/apache-solr-ref-guide-A.B.pdf https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/apache-solr-ref-guide-A.B.pdf.asc https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/apache-solr-ref-guide-A.B.pdf.sha1
+svn rm -m 'removing archived ref guide files prior to X.Y' https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/apache-solr-ref-guide-A.B.pdf https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/apache-solr-ref-guide-A.B.pdf.asc https://dist.apache.org/repos/dist/release/lucene/solr/ref-guide/apache-solr-ref-guide-A.B.pdf.sha512
 
 # Delete old RC files
 svn rm -m 'cleaning up old RCs now that X.Y has been released' https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RC0/ https://dist.apache.org/repos/dist/dev/lucene/solr/ref-guide/apache-solr-ref-guide-X.Y-RC1/


[21/29] lucene-solr:jira/http2: SOLR-12805: Store previous term (generation) of replica when start recovery process

Posted by da...@apache.org.
SOLR-12805: Store previous term (generation) of replica when start recovery process


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/667b8299
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/667b8299
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/667b8299

Branch: refs/heads/jira/http2
Commit: 667b8299e69755abfef89b3beb44cacdd292d479
Parents: 5816766
Author: Cao Manh Dat <da...@apache.org>
Authored: Wed Sep 26 10:35:44 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Wed Sep 26 10:35:44 2018 +0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                      |  2 ++
 .../src/java/org/apache/solr/cloud/ZkShardTerms.java  |  8 ++++++--
 .../test/org/apache/solr/cloud/ZkShardTermsTest.java  | 14 +++++++++-----
 3 files changed, 17 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/667b8299/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 6128986..98fb204 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -71,6 +71,8 @@ Other Changes
   java.time.DateTimeFormatter instead of Joda time (see upgrade notes).  "Lenient" is enabled.  Removed Joda Time dependency.
   (David Smiley, Bar Rotstein)
 
+* SOLR-12805: Store previous term (generation) of replica when start recovery process (Cao Manh Dat)
+
 ==================  7.6.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/667b8299/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java b/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
index 99df2a2..bcbb347 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
@@ -588,12 +588,16 @@ public class ZkShardTerms implements AutoCloseable{
      */
     Terms startRecovering(String coreNodeName) {
       long maxTerm = getMaxTerm();
-      if (values.get(coreNodeName) == maxTerm && values.getOrDefault(coreNodeName+"_recovering", -1L) == maxTerm)
+      if (values.get(coreNodeName) == maxTerm)
         return null;
 
       HashMap<String, Long> newValues = new HashMap<>(values);
+      if (!newValues.containsKey(coreNodeName+"_recovering")) {
+        long currentTerm = newValues.getOrDefault(coreNodeName, 0L);
+        // by keeping old term, we will have more information in leader election
+        newValues.put(coreNodeName+"_recovering", currentTerm);
+      }
       newValues.put(coreNodeName, maxTerm);
-      newValues.put(coreNodeName+"_recovering", maxTerm);
       return new Terms(newValues, version);
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/667b8299/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
index 201de26..1c1b1d2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
@@ -83,16 +83,19 @@ public class ZkShardTermsTest extends SolrCloudTestCase {
       // List all possible orders of ensureTermIsHigher, startRecovering, doneRecovering
       zkShardTerms.registerTerm("replica1");
       zkShardTerms.registerTerm("replica2");
+
+      // normal case when leader start lir process
       zkShardTerms.ensureTermsIsHigher("replica1", Collections.singleton("replica2"));
       zkShardTerms.startRecovering("replica2");
       assertEquals(zkShardTerms.getTerm("replica2"), 1);
-      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 1);
+      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 0);
 
       zkShardTerms.doneRecovering("replica2");
       assertEquals(zkShardTerms.getTerm("replica1"), 1);
       assertEquals(zkShardTerms.getTerm("replica2"), 1);
       assertEquals(zkShardTerms.getTerm("replica2_recovering"), -1);
 
+      // stack of lir processes
       zkShardTerms.ensureTermsIsHigher("replica1", Collections.singleton("replica2"));
       assertEquals(zkShardTerms.getTerm("replica1"), 2);
       assertEquals(zkShardTerms.getTerm("replica2"), 1);
@@ -100,16 +103,17 @@ public class ZkShardTermsTest extends SolrCloudTestCase {
 
       zkShardTerms.startRecovering("replica2");
       assertEquals(zkShardTerms.getTerm("replica2"), 2);
-      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 2);
+      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 1);
 
       zkShardTerms.ensureTermsIsHigher("replica1", Collections.singleton("replica2"));
       assertEquals(zkShardTerms.getTerm("replica1"), 3);
       assertEquals(zkShardTerms.getTerm("replica2"), 2);
-      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 2);
-      zkShardTerms.doneRecovering("replica2");
+      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 1);
 
+      zkShardTerms.doneRecovering("replica2");
       assertEquals(zkShardTerms.getTerm("replica2"), 2);
       assertEquals(zkShardTerms.getTerm("replica2_recovering"), -1);
+
       zkShardTerms.startRecovering("replica2");
       zkShardTerms.doneRecovering("replica2");
 
@@ -119,7 +123,7 @@ public class ZkShardTermsTest extends SolrCloudTestCase {
       zkShardTerms.startRecovering("replica2");
       assertEquals(zkShardTerms.getTerm("replica1"), 5);
       assertEquals(zkShardTerms.getTerm("replica2"), 5);
-      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 5);
+      assertEquals(zkShardTerms.getTerm("replica2_recovering"), 3);
       zkShardTerms.doneRecovering("replica2");
       assertEquals(zkShardTerms.getTerm("replica2_recovering"), -1);
 


[13/29] lucene-solr:jira/http2: SOLR-11522: /autoscaling/suggestions now include rebalance options as well even if there are no violations

Posted by da...@apache.org.
SOLR-11522: /autoscaling/suggestions now include rebalance options as well even if there are no violations


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3f2975c2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3f2975c2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3f2975c2

Branch: refs/heads/jira/http2
Commit: 3f2975c27c520ed7543779cd731a7cd525243479
Parents: baf40d5
Author: noble <no...@apache.org>
Authored: Tue Sep 25 00:34:21 2018 +1000
Committer: noble <no...@apache.org>
Committed: Tue Sep 25 00:34:21 2018 +1000

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   2 +
 .../cloud/autoscaling/FreeDiskVariable.java     |  12 +-
 .../solrj/cloud/autoscaling/PolicyHelper.java   |  87 +++++++++----
 .../client/solrj/cloud/autoscaling/Row.java     |   2 +-
 .../solrj/cloud/autoscaling/Suggester.java      |   2 +-
 .../solrj/cloud/autoscaling/Suggestion.java     |   5 +
 .../solr/common/ConditionalMapWriter.java       |   2 +-
 .../java/org/apache/solr/common/MapWriter.java  |   5 +
 .../java/org/apache/solr/common/util/Utils.java |  31 ++++-
 .../autoscaling/testSuggestionsRebalance2.json  | 130 +++++++++++++++++++
 .../testSuggestionsRebalanceOnly.json           | 105 +++++++++++++++
 .../solrj/cloud/autoscaling/TestPolicy.java     |  98 +++++++-------
 .../solrj/cloud/autoscaling/TestPolicy2.java    |  39 ++++++
 13 files changed, 431 insertions(+), 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2c4903b..6128986 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -95,6 +95,8 @@ New Features
   As part of this change, the CREATESHARD API now delegates placing replicas entirely to the ADDREPLICA command
   and uses the new parameters to add all the replicas in one API call. (shalin)
 
+* SOLR-11522: /autoscaling/suggestions now include rebalance options as well even if there are no violations (noble)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
index 600695a..778a837 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
@@ -18,9 +18,9 @@
 package org.apache.solr.client.solrj.cloud.autoscaling;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Comparator;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
@@ -104,19 +104,17 @@ public class FreeDiskVariable extends VariableBase {
         }
       }
     } else if (ctx.violation.replicaCountDelta < 0) {
-      suggestNegativeViolations(ctx, shards -> getSortedShards(ctx,shards));
+      suggestNegativeViolations(ctx, shards -> getSortedShards(ctx.session.matrix, shards, ctx.violation.coll));
     }
   }
 
 
-
-
-  private List<String> getSortedShards(Suggestion.Ctx ctx, Set<String> shardSet) {
+  static List<String> getSortedShards(List<Row> matrix, Collection<String> shardSet, String coll) {
     return  shardSet.stream()
         .map(shard1 -> {
           AtomicReference<Pair<String, Long>> result = new AtomicReference<>();
-          for (Row node : ctx.session.matrix) {
-            node.forEachShard(ctx.violation.coll, (s, ri) -> {
+          for (Row node : matrix) {
+            node.forEachShard(coll, (s, ri) -> {
               if (result.get() != null) return;
               if (s.equals(shard1) && ri.size() > 0) {
                 Number sz = ((Number) ri.get(0).getVariable(CORE_IDX.tagName));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
index d052d6f..29ccc65 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.StringWriter;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumMap;
 import java.util.HashMap;
@@ -57,6 +58,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK;
 import static org.apache.solr.common.ConditionalMapWriter.dedupeKeyPredicate;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
 import static org.apache.solr.common.params.CoreAdminParams.NODE;
 import static org.apache.solr.common.util.Utils.handleExp;
 import static org.apache.solr.common.util.Utils.time;
@@ -68,7 +70,7 @@ public class PolicyHelper {
   private static final String POLICY_MAPPING_KEY = "PolicyHelper.policyMapping";
 
   private static ThreadLocal<Map<String, String>> getPolicyMapping(SolrCloudManager cloudManager) {
-    return (ThreadLocal<Map<String, String>>)cloudManager.getObjectCache()
+    return (ThreadLocal<Map<String, String>>) cloudManager.getObjectCache()
         .computeIfAbsent(POLICY_MAPPING_KEY, k -> new ThreadLocal<>());
   }
 
@@ -222,43 +224,82 @@ public class PolicyHelper {
         .put("config", session.getPolicy());
   }
 
-  public static List<Suggester.SuggestionInfo> getSuggestions(AutoScalingConfig autoScalingConf, SolrCloudManager cloudManager) {
+  public static List<Suggester.SuggestionInfo> getSuggestions(AutoScalingConfig autoScalingConf,
+                                                              SolrCloudManager cloudManager) {
+    return getSuggestions(autoScalingConf, cloudManager, 20);
+  }
+
+  public static List<Suggester.SuggestionInfo> getSuggestions(AutoScalingConfig autoScalingConf,
+                                                              SolrCloudManager cloudManager, int max) {
     Policy policy = autoScalingConf.getPolicy();
-    Suggestion.Ctx suggestionCtx = new Suggestion.Ctx();
-    suggestionCtx.session = policy.createSession(cloudManager);
-    List<Violation> violations = suggestionCtx.session.getViolations();
+    Suggestion.Ctx ctx = new Suggestion.Ctx();
+    ctx.max = max;
+    ctx.session = policy.createSession(cloudManager);
+    List<Violation> violations = ctx.session.getViolations();
     for (Violation violation : violations) {
       String name = violation.getClause().isPerCollectiontag() ?
           violation.getClause().tag.name :
           violation.getClause().globalTag.name;
       Variable.Type tagType = VariableBase.getTagType(name);
-      tagType.getSuggestions(suggestionCtx.setViolation(violation));
-      suggestionCtx.violation = null;
+      tagType.getSuggestions(ctx.setViolation(violation));
+      ctx.violation = null;
+    }
+    if (ctx.getSuggestions().size() < max) {
+      suggestOptimizations(ctx);
+    }
+    return ctx.getSuggestions();
+  }
+
+  private static void suggestOptimizations(Suggestion.Ctx ctx) {
+    List<Row> matrix = ctx.session.matrix;
+    if (matrix.isEmpty()) return;
+    for (int i = 0; i < matrix.size(); i++) {
+      Row row = matrix.get(i);
+      Map<String, Collection<String>> collVsShards = new HashMap<>();
+      row.forEachReplica(ri -> collVsShards.computeIfAbsent(ri.getCollection(), s -> new HashSet<>()).add(ri.getShard()));
+      for (Map.Entry<String, Collection<String>> e : collVsShards.entrySet()) {
+        e.setValue(FreeDiskVariable.getSortedShards(Collections.singletonList(row), e.getValue(), e.getKey()));
+      }
+      for (Map.Entry<String, Collection<String>> e : collVsShards.entrySet()) {
+        if (!ctx.needMore()) break;
+        for (String shard : e.getValue()) {
+          if (!ctx.needMore()) break;
+          Suggester suggester = ctx.session.getSuggester(MOVEREPLICA)
+              .hint(Hint.COLL_SHARD, new Pair<>(e.getKey(), shard))
+              .hint(Hint.SRC_NODE, row.node);
+          ctx.addSuggestion(suggester);
+        }
+      }
     }
-    return suggestionCtx.getSuggestions();
   }
 
 
-  /**Use this to dump the state of a system and to generate a testcase
+  /**
+   * Use this to dump the state of a system and to generate a testcase
    */
   public static void logState(SolrCloudManager cloudManager, Suggester suggester) {
     if (log.isTraceEnabled()) {
-      log.trace("LOGSTATE: {}",
-          Utils.toJSONString((MapWriter) ew -> {
-            ew.put("liveNodes", cloudManager.getClusterStateProvider().getLiveNodes());
-            ew.put("suggester", suggester);
-            if (suggester.session.nodeStateProvider instanceof MapWriter) {
-              MapWriter nodeStateProvider = (MapWriter) suggester.session.nodeStateProvider;
-              nodeStateProvider.writeMap(ew);
-            }
-            try {
-              ew.put("autoscalingJson", cloudManager.getDistribStateManager().getAutoScalingConfig());
-            } catch (InterruptedException e) {
-            }
-          }));
+      try {
+        log.trace("LOGSTATE: {}",
+            Utils.writeJson(loggingInfo(cloudManager.getDistribStateManager().getAutoScalingConfig().getPolicy(), cloudManager, suggester),
+                new StringWriter(), true).toString());
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
     }
   }
 
+
+  static MapWriter loggingInfo(Policy policy, SolrCloudManager cloudManager, Suggester suggester) {
+    return ew -> {
+      ew.put("diagnostics", getDiagnostics(policy,
+          cloudManager));
+      if (suggester != null) {
+        ew.put("suggester", suggester);
+      }
+    };
+  }
+
   public enum Status {
     NULL,
     //it is just created and not yet used or all operations on it has been completed fully
@@ -292,7 +333,6 @@ public class PolicyHelper {
     /**
      * All operations suggested by the current session object
      * is complete. Do not even cache anything
-     *
      */
     private void release(SessionWrapper sessionWrapper) {
       synchronized (lockObj) {
@@ -306,7 +346,6 @@ public class PolicyHelper {
     /**
      * Computing is over for this session and it may contain a new session with new state
      * The session can be used by others while the caller is performing operations
-     *
      */
     private void returnSession(SessionWrapper sessionWrapper) {
       TimeSource timeSource = sessionWrapper.session != null ? sessionWrapper.session.cloudManager.getTimeSource() : TimeSource.NANO_TIME;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
index 85d6f30..dcd5fba 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
@@ -154,7 +154,7 @@ public class Row implements MapWriter {
     if (row == null) throw new RuntimeException("couldn't get a row");
     Map<String, List<ReplicaInfo>> c = row.collectionVsShardVsReplicas.computeIfAbsent(coll, k -> new HashMap<>());
     List<ReplicaInfo> replicas = c.computeIfAbsent(shard, k -> new ArrayList<>());
-    String replicaname = "" + new Random().nextInt(1000) + 1000;
+    String replicaname = "SYNTHETIC." + new Random().nextInt(1000) + 1000;
     ReplicaInfo ri = new ReplicaInfo(replicaname, replicaname, coll, shard, type, this.node,
         Utils.makeMap(ZkStateReader.REPLICA_TYPE, type != null ? type.toString() : Replica.Type.NRT.toString()));
     replicas.add(ri);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
index db0aab4..9f42b9f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
@@ -208,7 +208,7 @@ public abstract class Suggester implements MapWriter {
     @Override
     public void writeMap(EntryWriter ew) throws IOException {
       ew.put("type", violation == null ? "improvement" : "violation");
-      ew.putIfNotNull("violation",
+      if(violation!= null) ew.put("violation",
           new ConditionalMapWriter(violation,
               (k, v) -> !"violatingReplicas".equals(k)));
       ew.put("operation", operation);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
index 1f711e5..8f120e2 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
@@ -32,6 +32,7 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.MO
 
 public class Suggestion {
   static class Ctx {
+    int max = Integer.MAX_VALUE;
     public Policy.Session session;
     public Violation violation;
     private List<Suggester.SuggestionInfo> suggestions = new ArrayList<>();
@@ -55,6 +56,10 @@ public class Suggestion {
     public List<Suggester.SuggestionInfo> getSuggestions() {
       return suggestions;
     }
+
+    public boolean needMore() {
+      return suggestions.size() < max;
+    }
   }
 
   static void suggestNegativeViolations(Suggestion.Ctx ctx, Function<Set<String>, List<String>> shardSorter) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/common/ConditionalMapWriter.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/ConditionalMapWriter.java b/solr/solrj/src/java/org/apache/solr/common/ConditionalMapWriter.java
index 706414d..d351fc2 100644
--- a/solr/solrj/src/java/org/apache/solr/common/ConditionalMapWriter.java
+++ b/solr/solrj/src/java/org/apache/solr/common/ConditionalMapWriter.java
@@ -71,7 +71,7 @@ public class ConditionalMapWriter implements MapWriter {
 
   @Override
   public void writeMap(EntryWriter ew) throws IOException {
-    delegate.writeMap(new EntryWriterWrapper(ew));
+    if(delegate!=null) delegate.writeMap(new EntryWriterWrapper(ew));
   }
 
   public static BiPredicate<String, Object> dedupeKeyPredicate(Set<String> keys) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/MapWriter.java b/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
index aa8ab38..4bdc00d 100644
--- a/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
+++ b/solr/solrj/src/java/org/apache/solr/common/MapWriter.java
@@ -80,6 +80,11 @@ public interface MapWriter extends MapSerializable {
 
   void writeMap(EntryWriter ew) throws IOException;
 
+
+  default Object _get(String path, Object def) {
+    Object v = Utils.getObjectByPath(this, false, path);
+    return v == null ? def : v;
+  }
   /**
    * An interface to push one entry at a time to the output.
    * The order of the keys is not defined, but we assume they are distinct -- don't call {@code put} more than once

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
index 9e72607..800c2c1 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
@@ -70,6 +70,7 @@ import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.Collections.singletonList;
 import static java.util.Collections.unmodifiableList;
 import static java.util.Collections.unmodifiableSet;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
@@ -267,6 +268,7 @@ public class Utils {
   }
 
   public static Object getObjectByPath(Object root, boolean onlyPrimitive, String hierarchy) {
+    if (hierarchy == null) return getObjectByPath(root, onlyPrimitive, singletonList(null));
     List<String> parts = StrUtils.splitSmart(hierarchy, '/');
     if (parts.get(0).isEmpty()) parts.remove(0);
     return getObjectByPath(root, onlyPrimitive, parts);
@@ -362,8 +364,12 @@ public class Utils {
         Object val = getVal(obj, s);
         if (val == null) return null;
         if (idx > -1) {
-          List l = (List) val;
-          val = idx < l.size() ? l.get(idx) : null;
+          if (val instanceof IteratorWriter) {
+            val = getValueAt((IteratorWriter) val, idx);
+          } else {
+            List l = (List) val;
+            val = idx < l.size() ? l.get(idx) : null;
+          }
         }
         if (onlyPrimitive && isMapLike(val)) {
           return null;
@@ -375,6 +381,27 @@ public class Utils {
     return false;
   }
 
+  private static Object getValueAt(IteratorWriter iteratorWriter, int idx) {
+    Object[] result = new Object[1];
+    try {
+      iteratorWriter.writeIter(new IteratorWriter.ItemWriter() {
+        int i = -1;
+
+        @Override
+        public IteratorWriter.ItemWriter add(Object o) {
+          ++i;
+          if (i > idx) return this;
+          if (i == idx) result[0] = o;
+          return this;
+        }
+      });
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return result[0];
+
+  }
+
   private static boolean isMapLike(Object o) {
     return o instanceof Map || o instanceof NamedList || o instanceof MapWriter;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalance2.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalance2.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalance2.json
new file mode 100644
index 0000000..958efc0
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalance2.json
@@ -0,0 +1,130 @@
+{
+  "diagnostics":{
+    "sortedNodes":[
+     {
+      "node":"10.0.0.79:7574_solr",
+      "isLive":true,
+      "cores":4.0,
+      "freedisk":140.8341178894043,
+      "totaldisk":233.5667953491211,
+      "replicas":{
+        "gettingstarted":{
+          "shard2":[{
+            "core_node7":{
+              "core":"gettingstarted_shard2_replica_n4",
+              "shard":"shard2",
+              "collection":"gettingstarted",
+              "node_name":"10.0.0.79:7574_solr",
+              "type":"NRT",
+              "leader":"true",
+              "base_url":"http://10.0.0.79:7574/solr",
+              "state":"active",
+              "force_set_state":"false",
+              "INDEX.sizeInGB":6.426125764846802E-8}}],
+          "shard1":[{
+            "core_node3":{
+              "core":"gettingstarted_shard1_replica_n1",
+              "shard":"shard1",
+              "collection":"gettingstarted",
+              "node_name":"10.0.0.79:7574_solr",
+              "type":"NRT",
+              "leader":"true",
+              "base_url":"http://10.0.0.79:7574/solr",
+              "state":"active",
+              "force_set_state":"false",
+              "INDEX.sizeInGB":6.426125764846802E-8}}]},
+        "go":{
+          "shard2":[{
+            "core_node7":{
+              "core":"go_shard2_replica_n4",
+              "shard":"shard2",
+              "collection":"go",
+              "node_name":"10.0.0.79:7574_solr",
+              "type":"NRT",
+              "leader":"true",
+              "base_url":"http://10.0.0.79:7574/solr",
+              "state":"active",
+              "force_set_state":"false",
+              "INDEX.sizeInGB":6.426125764846802E-8}}],
+          "shard1":[{
+            "core_node3":{
+              "core":"go_shard1_replica_n1",
+              "shard":"shard1",
+              "collection":"go",
+              "node_name":"10.0.0.79:7574_solr",
+              "type":"NRT",
+              "leader":"true",
+              "base_url":"http://10.0.0.79:7574/solr",
+              "state":"active",
+              "force_set_state":"false",
+              "INDEX.sizeInGB":6.426125764846802E-8}}]}}}
+    ,{
+        "node":"10.0.0.79:8984_solr",
+        "isLive":true,
+        "cores":4.0,
+        "freedisk":140.8341178894043,
+        "totaldisk":233.5667953491211,
+        "replicas":{
+          "gettingstarted":{
+            "shard2":[{
+              "core_node8":{
+                "core":"gettingstarted_shard2_replica_n6",
+                "shard":"shard2",
+                "collection":"gettingstarted",
+                "node_name":"10.0.0.79:8984_solr",
+                "type":"NRT",
+                "base_url":"http://10.0.0.79:8984/solr",
+                "state":"active",
+                "force_set_state":"false",
+                "INDEX.sizeInGB":6.426125764846802E-8}}],
+            "shard1":[{
+              "core_node5":{
+                "core":"gettingstarted_shard1_replica_n2",
+                "shard":"shard1",
+                "collection":"gettingstarted",
+                "node_name":"10.0.0.79:8984_solr",
+                "type":"NRT",
+                "base_url":"http://10.0.0.79:8984/solr",
+                "state":"active",
+                "force_set_state":"false",
+                "INDEX.sizeInGB":6.426125764846802E-8}}]},
+          "go":{
+            "shard2":[{
+              "core_node8":{
+                "core":"go_shard2_replica_n6",
+                "shard":"shard2",
+                "collection":"go",
+                "node_name":"10.0.0.79:8984_solr",
+                "type":"NRT",
+                "base_url":"http://10.0.0.79:8984/solr",
+                "state":"active",
+                "force_set_state":"false",
+                "INDEX.sizeInGB":6.426125764846802E-8}}],
+            "shard1":[{
+              "core_node5":{
+                "core":"go_shard1_replica_n2",
+                "shard":"shard1",
+                "collection":"go",
+                "node_name":"10.0.0.79:8984_solr",
+                "type":"NRT",
+                "base_url":"http://10.0.0.79:8984/solr",
+                "state":"active",
+                "force_set_state":"false",
+                "INDEX.sizeInGB":6.426125764846802E-8}}]}}}
+    ,{
+        "node":"10.0.0.79:8983_solr",
+        "isLive":true,
+        "cores":0.0,
+        "freedisk":140.8341178894043,
+        "totaldisk":233.5667953491211,
+        "replicas":{}}],
+    "liveNodes":["10.0.0.79:7574_solr",
+      "10.0.0.79:8983_solr",
+      "10.0.0.79:8984_solr"],
+    "violations":[],
+    "config":{
+      "cluster-preferences":[{
+        "minimize":"cores",
+        "precision":1}
+      ,{
+          "maximize":"freedisk"}]}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalanceOnly.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalanceOnly.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalanceOnly.json
new file mode 100644
index 0000000..ce0b682
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSuggestionsRebalanceOnly.json
@@ -0,0 +1,105 @@
+{"diagnostics":{
+  "sortedNodes":[{
+    "node":"127.0.0.1:63191_solr",
+    "isLive":true,
+    "cores":3.0,
+    "sysprop.zone":"east",
+    "freedisk":1727.1459312438965,
+    "heapUsage":24.97510064011647,
+    "sysLoadAvg":272.75390625,
+    "totaldisk":1037.938980102539,
+    "replicas":{"zonesTest":{"shard1":[{"core_node5":{
+      "core":"zonesTest_shard1_replica_n2",
+      "leader":"true",
+      "base_url":"https://127.0.0.1:63191/solr",
+      "node_name":"127.0.0.1:63191_solr",
+      "state":"active",
+      "type":"NRT",
+      "force_set_state":"false",
+      "INDEX.sizeInGB":6.426125764846802E-8,
+      "shard":"shard1",
+      "collection":"zonesTest"}},
+      {"core_node7":{
+        "core":"zonesTest_shard1_replica_n4",
+        "base_url":"https://127.0.0.1:63191/solr",
+        "node_name":"127.0.0.1:63191_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"zonesTest"}},
+      {"core_node12":{
+        "core":"zonesTest_shard1_replica_n10",
+        "base_url":"https://127.0.0.1:63191/solr",
+        "node_name":"127.0.0.1:63191_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard1",
+        "collection":"zonesTest"}}]}}},
+    {
+      "node":"127.0.0.1:63192_solr",
+      "isLive":true,
+      "cores":3.0,
+      "sysprop.zone":"east",
+      "freedisk":1727.1459312438965,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{"zonesTest":{"shard2":[{"core_node3":{
+        "core":"zonesTest_shard1_replica_n1",
+        "base_url":"https://127.0.0.1:63192/solr",
+        "node_name":"127.0.0.1:63192_solr",
+        "state":"active",
+        "type":"NRT",
+        "force_set_state":"false",
+        "INDEX.sizeInGB":6.426125764846802E-8,
+        "shard":"shard2",
+        "collection":"zonesTest"}},
+        {"core_node9":{
+          "core":"zonesTest_shard1_replica_n6",
+          "base_url":"https://127.0.0.1:63192/solr",
+          "node_name":"127.0.0.1:63192_solr",
+          "state":"active",
+          "type":"NRT",
+          "force_set_state":"false",
+          "INDEX.sizeInGB":6.426125764846802E-8,
+          "shard":"shard2",
+          "collection":"zonesTest"}},
+        {"core_node11":{
+          "core":"zonesTest_shard1_replica_n8",
+          "base_url":"https://127.0.0.1:63192/solr",
+          "node_name":"127.0.0.1:63192_solr",
+          "state":"active",
+          "type":"NRT",
+          "force_set_state":"false",
+          "INDEX.sizeInGB":6.426125764846802E-8,
+          "shard":"shard2",
+          "collection":"zonesTest"}}]}}},
+    {
+      "node":"127.0.0.1:63219_solr",
+      "isLive":true,
+      "cores":0.0,
+      "sysprop.zone":"west",
+      "freedisk":1768.6174201965332,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{}},
+    {
+      "node":"127.0.0.1:63229_solr",
+      "isLive":true,
+      "cores":0.0,
+      "sysprop.zone":"west",
+      "freedisk":1768.6174201965332,
+      "heapUsage":24.98878807983566,
+      "sysLoadAvg":272.75390625,
+      "totaldisk":1037.938980102539,
+      "replicas":{}}],
+  "liveNodes":["127.0.0.1:63191_solr",
+    "127.0.0.1:63192_solr",
+    "127.0.0.1:63219_solr",
+    "127.0.0.1:63229_solr"]
+}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index 4a16259..2b11d0c 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -79,6 +79,7 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.MO
 
 public class TestPolicy extends SolrTestCaseJ4 {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
   static Suggester createSuggester(SolrCloudManager cloudManager, Map jsonObj, Suggester seed) throws IOException, InterruptedException {
     Policy.Session session = null;
     if (seed != null) session = seed.session;
@@ -397,19 +398,17 @@ public class TestPolicy extends SolrTestCaseJ4 {
     // collect the set of nodes to which replicas are being added
     Set<String> nodes = new HashSet<>(2);
 
-    m = l.get(0).toMap(new LinkedHashMap<>());
-    assertEquals(1.0d, Utils.getObjectByPath(m, true, "violation/violation/delta"));
-    assertEquals("POST", Utils.getObjectByPath(m, true, "operation/method"));
-    assertEquals("/c/articles_coll/shards", Utils.getObjectByPath(m, true, "operation/path"));
-    assertNotNull(Utils.getObjectByPath(m, false, "operation/command/add-replica"));
-    nodes.add((String) Utils.getObjectByPath(m, true, "operation/command/add-replica/node"));
+    assertEquals(1.0d, l.get(0)._get("violation/violation/delta", null));
+    assertEquals("POST", l.get(0)._get("operation/method", null));
+    assertEquals("/c/articles_coll/shards", l.get(0)._get("operation/path", null));
+    assertNotNull(l.get(0)._get("operation/command/add-replica", null));
+    nodes.add((String) l.get(0)._get("operation/command/add-replica/node", null));
 
-    m = l.get(1).toMap(new LinkedHashMap<>());
-    assertEquals(1.0d, Utils.getObjectByPath(m, true, "violation/violation/delta"));
-    assertEquals("POST", Utils.getObjectByPath(m, true, "operation/method"));
-    assertEquals("/c/articles_coll/shards", Utils.getObjectByPath(m, true, "operation/path"));
-    assertNotNull(Utils.getObjectByPath(m, false, "operation/command/add-replica"));
-    nodes.add((String) Utils.getObjectByPath(m, true, "operation/command/add-replica/node"));
+    assertEquals(1.0d, l.get(1)._get("violation/violation/delta", null));
+    assertEquals("POST", l.get(1)._get("operation/method", null));
+    assertEquals("/c/articles_coll/shards", l.get(1)._get("operation/path", null));
+    assertNotNull(l.get(1)._get("operation/command/add-replica", null));
+    nodes.add((String) l.get(1)._get("operation/command/add-replica/node", null));
 
     assertEquals(2, nodes.size());
     assertTrue(nodes.contains("node1"));
@@ -565,18 +564,17 @@ public class TestPolicy extends SolrTestCaseJ4 {
     Set<String> targetNodes = new HashSet<>();
     Set<String> movedReplicas = new HashSet<>();
     for (Suggester.SuggestionInfo suggestionInfo : l) {
-      Map s = suggestionInfo.toMap(new LinkedHashMap<>());
-      assertEquals("POST", Utils.getObjectByPath(s, true, "operation/method"));
-      if (Utils.getObjectByPath(s, false, "operation/command/add-replica") != null) {
+      assertEquals("POST", suggestionInfo._get("operation/method", null));
+      if (suggestionInfo._get("operation/command/add-replica", null) != null) {
         numAdds++;
-        assertEquals(1.0d, Utils.getObjectByPath(s, true, "violation/violation/delta"));
-        assertEquals("/c/articles_coll/shards", Utils.getObjectByPath(s, true, "operation/path"));
-        addNodes.add((String) Utils.getObjectByPath(s, true, "operation/command/add-replica/node"));
-      } else if (Utils.getObjectByPath(s, false, "operation/command/move-replica") != null) {
+        assertEquals(1.0d, suggestionInfo._get("violation/violation/delta", null));
+        assertEquals("/c/articles_coll/shards", suggestionInfo._get("operation/path", null));
+        addNodes.add((String) suggestionInfo._get("operation/command/add-replica/node", null));
+      } else if (suggestionInfo._get("operation/command/move-replica", null) != null) {
         numMoves++;
-        assertEquals("/c/articles_coll", Utils.getObjectByPath(s, true, "operation/path"));
-        targetNodes.add((String) Utils.getObjectByPath(s, true, "operation/command/move-replica/targetNode"));
-        movedReplicas.add((String) Utils.getObjectByPath(s, true, "operation/command/move-replica/replica"));
+        assertEquals("/c/articles_coll", suggestionInfo._get("operation/path", null));
+        targetNodes.add((String) suggestionInfo._get("operation/command/move-replica/targetNode", null));
+        movedReplicas.add((String) suggestionInfo._get("operation/command/move-replica/replica", null));
       } else {
         fail("Unexpected operation type suggested for suggestion: " + suggestionInfo);
       }
@@ -2357,13 +2355,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
         cloudManagerWithData(dataproviderdata));
     assertFalse(l.isEmpty());
 
-    Map m = l.get(0).toMap(new LinkedHashMap<>());
-    assertEquals(1.0d, Utils.getObjectByPath(m, true, "violation/violation/delta"));
-    assertEquals("POST", Utils.getObjectByPath(m, true, "operation/method"));
-    assertEquals("/c/mycoll1", Utils.getObjectByPath(m, true, "operation/path"));
-    assertNotNull(Utils.getObjectByPath(m, false, "operation/command/move-replica"));
-    assertEquals("10.0.0.6:7574_solr", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
-    assertEquals("core_node2", Utils.getObjectByPath(m, true, "operation/command/move-replica/replica"));
+    assertEquals(1.0d, l.get(0)._get( "violation/violation/delta",null));
+    assertEquals("POST", l.get(0)._get("operation/method",null));
+    assertEquals("/c/mycoll1", l.get(0)._get( "operation/path",null));
+    assertNotNull(l.get(0)._get("operation/command/move-replica", null));
+    assertEquals("10.0.0.6:7574_solr", l.get(0)._get( "operation/command/move-replica/targetNode",null));
+    assertEquals("core_node2", l.get(0)._get("operation/command/move-replica/replica", null));
   }
 
 
@@ -2526,7 +2523,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
 
-  public void testFreeDiskSuggestions() throws IOException {
+  public void testFreeDiskSuggestions() {
     String dataproviderdata = "{" +
         "  liveNodes:[node1,node2]," +
         "  replicaInfo : {" +
@@ -2557,17 +2554,14 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
     List<Suggester.SuggestionInfo> l = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
     assertEquals(3, l.size());
-    Map m = l.get(0).toMap(new LinkedHashMap<>());
-    assertEquals("r4", Utils.getObjectByPath(m, true, "operation/command/move-replica/replica"));
-    assertEquals("node1", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
+    assertEquals("r4", l.get(0)._get("operation/command/move-replica/replica", null));
+    assertEquals("node1", l.get(0)._get("operation/command/move-replica/targetNode", null));
 
-    m = l.get(1).toMap(new LinkedHashMap<>());
-    assertEquals("r3", Utils.getObjectByPath(m, true, "operation/command/move-replica/replica"));
-    assertEquals("node1", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
+    assertEquals("r3", l.get(1)._get("operation/command/move-replica/replica", null));
+    assertEquals("node1", l.get(1)._get("operation/command/move-replica/targetNode", null));
 
-    m = l.get(2).toMap(new LinkedHashMap<>());
-    assertEquals("r2", Utils.getObjectByPath(m, true, "operation/command/move-replica/replica"));
-    assertEquals("node1", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
+    assertEquals("r2", l.get(2)._get("operation/command/move-replica/replica", null));
+    assertEquals("node1", l.get(2)._get("operation/command/move-replica/targetNode", null));
 
 
     autoScalingjson = "  { cluster-policy:[" +
@@ -2582,18 +2576,17 @@ public class TestPolicy extends SolrTestCaseJ4 {
     assertEquals(0, violations.get(0).getViolatingReplicas().size());
 
     l = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
-    assertEquals(3, l.size());
-    m = l.get(0).toMap(new LinkedHashMap<>());
-    assertEquals("r4", Utils.getObjectByPath(m, true, "operation/command/move-replica/replica"));
-    assertEquals("node1", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
+    assertEquals(4, l.size());
+    assertEquals("r4", l.get(0)._get("operation/command/move-replica/replica", null));
+    assertEquals("node1", l.get(0)._get("operation/command/move-replica/targetNode", null));
+
+    assertEquals("r3", l.get(1)._get("operation/command/move-replica/replica", null));
+    assertEquals("node1", l.get(1)._get("operation/command/move-replica/targetNode", null));
 
-    m = l.get(1).toMap(new LinkedHashMap<>());
-    assertEquals("r3", Utils.getObjectByPath(m, true, "operation/command/move-replica/replica"));
-    assertEquals("node1", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
+    assertEquals("r2", l.get(2)._get("operation/command/move-replica/replica", null));
+    assertEquals("node1", l.get(2)._get("operation/command/move-replica/targetNode", null));
 
-    m = l.get(2).toMap(new LinkedHashMap<>());
-    assertEquals("r2", Utils.getObjectByPath(m, true, "operation/command/move-replica/replica"));
-    assertEquals("node1", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
+    assertEquals("improvement", l.get(3)._get("type", null));
 
 
   }
@@ -2630,10 +2623,9 @@ public class TestPolicy extends SolrTestCaseJ4 {
         cloudManagerWithData(dataproviderdata));
     assertEquals(2, l.size());
     for (Suggester.SuggestionInfo suggestionInfo : l) {
-      Map m = suggestionInfo.toMap(new LinkedHashMap<>());
-      assertEquals("10.0.0.6:7574_solr", Utils.getObjectByPath(m, true, "operation/command/move-replica/targetNode"));
-      assertEquals("POST", Utils.getObjectByPath(m, true, "operation/method"));
-      assertEquals("/c/mycoll1", Utils.getObjectByPath(m, true, "operation/path"));
+      assertEquals("10.0.0.6:7574_solr", suggestionInfo._get("operation/command/move-replica/targetNode", null));
+      assertEquals("POST", suggestionInfo._get("operation/method", null));
+      assertEquals("/c/mycoll1", suggestionInfo._get("operation/path", null));
     }
 
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3f2975c2/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
index 71c0287..e902ff9 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
@@ -377,6 +377,45 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     }
   }
 
+  public void testSuggestionsRebalanceOnly() throws IOException {
+    String conf = " {" +
+        "    'cluster-preferences':[{" +
+        "      'minimize':'cores'," +
+        "      'precision':1}," +
+        "      {'maximize':'freedisk','precision':100}," +
+        "      {'minimize':'sysLoadAvg','precision':10}]," +
+        "    'cluster-policy':[" +
+        "{'replica':'<5','shard':'#EACH','sysprop.zone':['east','west']}]}";
+    Map<String, Object> m = (Map<String, Object>) loadFromResource("testSuggestionsRebalanceOnly.json");
+    SolrCloudManager cloudManagerFromDiagnostics = createCloudManagerFromDiagnostics(m);
+    AutoScalingConfig autoScalingConfig = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(conf));
+    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, cloudManagerFromDiagnostics);
+
+    assertEquals(2, suggestions.size());
+    assertEquals("improvement", suggestions.get(0)._get("type",null));
+    assertEquals("127.0.0.1:63229_solr", suggestions.get(0)._get("operation/command/move-replica/targetNode", null));
+    assertEquals("improvement", suggestions.get(1)._get( "type",null));
+    assertEquals("127.0.0.1:63219_solr", suggestions.get(1)._get("operation/command/move-replica/targetNode", null));
+  }
+
+  public void testSuggestionsRebalance2() throws IOException {
+    Map<String, Object> m = (Map<String, Object>) loadFromResource("testSuggestionsRebalance2.json");
+    SolrCloudManager cloudManagerFromDiagnostics = createCloudManagerFromDiagnostics(m);
+
+    AutoScalingConfig autoScalingConfig = new AutoScalingConfig((Map<String, Object>) Utils.getObjectByPath(m, false, "diagnostics/config"));
+    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, cloudManagerFromDiagnostics);
+
+    assertEquals(3, suggestions.size());
+
+    for (Suggester.SuggestionInfo suggestion : suggestions) {
+      assertEquals("improvement", suggestion._get("type", null));
+      assertEquals("10.0.0.79:8983_solr", suggestion._get("operation/command/move-replica/targetNode",null));
+    }
+
+
+
+  }
+
   public static Object loadFromResource(String file) throws IOException {
     try (InputStream is = TestPolicy2.class.getResourceAsStream("/solrj/solr/autoscaling/" + file)) {
       return Utils.fromJSON(is);