You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/01/23 10:30:31 UTC

[01/41] lucene-solr:jira/solr-11702: Update doap files with 7.2.1 release

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-11702 e8c17c140 -> 6f580a454


Update doap files with 7.2.1 release


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/518a3ec1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/518a3ec1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/518a3ec1

Branch: refs/heads/jira/solr-11702
Commit: 518a3ec14b86baa41784369695ccfb5203de17ce
Parents: 6dcbb2d
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Jan 15 09:59:11 2018 +0100
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Jan 15 09:59:11 2018 +0100

----------------------------------------------------------------------
 dev-tools/doap/lucene.rdf | 7 +++++++
 dev-tools/doap/solr.rdf   | 7 +++++++
 2 files changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/518a3ec1/dev-tools/doap/lucene.rdf
----------------------------------------------------------------------
diff --git a/dev-tools/doap/lucene.rdf b/dev-tools/doap/lucene.rdf
index ade660d..12caed3 100644
--- a/dev-tools/doap/lucene.rdf
+++ b/dev-tools/doap/lucene.rdf
@@ -69,6 +69,13 @@
     <!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
     <release>
       <Version>
+        <name>lucene-7.2.1</name>
+        <created>2018-01-15</created>
+        <revision>7.2.1</revision>
+      </Version>
+    </release>
+    <release>
+      <Version>
         <name>lucene-7.2.0</name>
         <created>2017-12-21</created>
         <revision>7.2.0</revision>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/518a3ec1/dev-tools/doap/solr.rdf
----------------------------------------------------------------------
diff --git a/dev-tools/doap/solr.rdf b/dev-tools/doap/solr.rdf
index fd2f96a..105e711 100644
--- a/dev-tools/doap/solr.rdf
+++ b/dev-tools/doap/solr.rdf
@@ -69,6 +69,13 @@
     <!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
     <release>
       <Version>
+        <name>solr-7.2.1</name>
+        <created>2018-01-15</created>
+        <revision>7.2.1</revision>
+      </Version>
+    </release>
+    <release>
+      <Version>
         <name>solr-7.2.0</name>
         <created>2017-12-21</created>
         <revision>7.2.0</revision>


[31/41] lucene-solr:jira/solr-11702: SOLR-11867: Add indexOf, rowCount and columnCount StreamEvaluators

Posted by da...@apache.org.
SOLR-11867: Add indexOf, rowCount and columnCount StreamEvaluators


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f491fad9
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f491fad9
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f491fad9

Branch: refs/heads/jira/solr-11702
Commit: f491fad955fc7442be99f2c44724a9c631fd638b
Parents: 42832f8
Author: Joel Bernstein <jb...@apache.org>
Authored: Wed Jan 17 15:42:51 2018 -0500
Committer: Joel Bernstein <jb...@apache.org>
Committed: Wed Jan 17 15:43:05 2018 -0500

----------------------------------------------------------------------
 .../org/apache/solr/handler/StreamHandler.java  |  3 ++
 .../solrj/io/eval/ColumnCountEvaluator.java     | 42 ++++++++++++++++
 .../client/solrj/io/eval/IndexOfEvaluator.java  | 51 ++++++++++++++++++++
 .../solr/client/solrj/io/eval/Matrix.java       |  8 +++
 .../client/solrj/io/eval/RowCountEvaluator.java | 42 ++++++++++++++++
 .../solrj/io/stream/StreamExpressionTest.java   | 13 ++++-
 6 files changed, 157 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f491fad9/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index aa602860..b9a271a 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -307,6 +307,9 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
         .withFunctionName("setRowLabels", SetRowLabelsEvaluator.class)
         .withFunctionName("knn", KnnEvaluator.class)
         .withFunctionName("getAttributes", GetAttributesEvaluator.class)
+        .withFunctionName("indexOf", IndexOfEvaluator.class)
+        .withFunctionName("columnCount", ColumnCountEvaluator.class)
+        .withFunctionName("rowCount", RowCountEvaluator.class)
 
         // Boolean Stream Evaluators
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f491fad9/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnCountEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnCountEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnCountEvaluator.java
new file mode 100644
index 0000000..2949e45
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnCountEvaluator.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class ColumnCountEvaluator extends RecursiveObjectEvaluator implements OneValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public ColumnCountEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value) throws IOException {
+    if(!(value instanceof Matrix)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting a Matrix",toExpression(constructingFactory), value.getClass().getSimpleName()));
+    } else {
+      Matrix matrix = (Matrix)value;
+      return matrix.getColumnCount();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f491fad9/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/IndexOfEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/IndexOfEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/IndexOfEvaluator.java
new file mode 100644
index 0000000..60136ff
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/IndexOfEvaluator.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.List;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class IndexOfEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public IndexOfEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+    if(!(value1 instanceof List)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting an array",toExpression(constructingFactory), value1.getClass().getSimpleName()));
+    } else {
+      List list = (List)value1;
+      String find = value2.toString().replace("\"","");
+      for(int i=0; i<list.size(); i++) {
+        Object o = list.get(i);
+        if(o.toString().equals(find)) {
+          return i;
+        }
+      }
+
+      return -1;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f491fad9/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Matrix.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Matrix.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Matrix.java
index 7fcfca2..ed10a8c 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Matrix.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Matrix.java
@@ -67,6 +67,14 @@ public class Matrix implements Iterable, Attributes {
     return this.data;
   }
 
+  public int getRowCount() {
+    return data.length;
+  }
+
+  public int getColumnCount() {
+    return data[0].length;
+  }
+
   public Iterator iterator() {
     return new MatrixIterator(data);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f491fad9/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowCountEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowCountEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowCountEvaluator.java
new file mode 100644
index 0000000..8e92d1a
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowCountEvaluator.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class RowCountEvaluator extends RecursiveObjectEvaluator implements OneValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public RowCountEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value) throws IOException {
+    if(!(value instanceof Matrix)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting a Matrix",toExpression(constructingFactory), value.getClass().getSimpleName()));
+    } else {
+      Matrix matrix = (Matrix)value;
+      return matrix.getRowCount();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f491fad9/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 1493562..4937992 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -6178,11 +6178,15 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     String cexpr = "let(echo=true," +
         "               a=setColumnLabels(matrix(array(1, 2, 3), " +
         "                                        rev(array(4,5,6)))," +
-        "                                 array(col1, col2, col3))," +
+        "                                        array(col1, col2, col3))," +
         "               b=rowAt(a, 1)," +
         "               c=colAt(a, 2)," +
         "               d=getColumnLabels(a)," +
-        "               e=topFeatures(a, 1))";
+        "               e=topFeatures(a, 1)," +
+        "               f=rowCount(a)," +
+        "               g=columnCount(a)," +
+        "               h=indexOf(d, \"col2\")," +
+        "               i=indexOf(d, col3))";
     ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
     paramsLoc.set("expr", cexpr);
     paramsLoc.set("qt", "/stream");
@@ -6230,6 +6234,11 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     assertEquals(features.get(1).size(), 1);
     assertEquals(features.get(0).get(0), "col3");
     assertEquals(features.get(1).get(0), "col1");
+
+    assertTrue(tuples.get(0).getLong("f") == 2);
+    assertTrue(tuples.get(0).getLong("g")== 3);
+    assertTrue(tuples.get(0).getLong("h")== 1);
+    assertTrue(tuples.get(0).getLong("i")== 2);
   }
 
 


[26/41] lucene-solr:jira/solr-11702: LUCENE-8129: allow passing filtered unicode sets to ICUFoldingFilter

Posted by da...@apache.org.
LUCENE-8129: allow passing filtered unicode sets to ICUFoldingFilter


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6781a0d2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6781a0d2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6781a0d2

Branch: refs/heads/jira/solr-11702
Commit: 6781a0d2d3113e4f423bf717e9c8f781374265ca
Parents: a6b5c5b
Author: Rob Muir <ro...@ntrepidcorp.com>
Authored: Tue Jan 16 12:41:31 2018 -0800
Committer: Rob Muir <ro...@ntrepidcorp.com>
Committed: Tue Jan 16 12:45:17 2018 -0800

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  3 +++
 .../lucene/analysis/icu/ICUFoldingFilter.java   | 28 +++++++++++++++-----
 .../analysis/icu/ICUFoldingFilterFactory.java   | 20 ++++++++++++--
 .../icu/TestICUFoldingFilterFactory.java        | 21 +++++++++++++--
 4 files changed, 62 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6781a0d2/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 435a461..038285e 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -125,6 +125,9 @@ Improvements
 
 * LUCENE-8125: ICUTokenizer support for emoji/emoji sequence tokens. (Robert Muir)
 
+* LUCENE-8129: A Unicode set filter can now be specified when using ICUFoldingFilter.
+  (Ere Maijala)
+
 Bug Fixes
 
 * LUCENE-8077: Fixed bug in how CheckIndex verifies doc-value iterators.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6781a0d2/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilter.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilter.java
index 0895b47..9c3770c 100644
--- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilter.java
+++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilter.java
@@ -59,18 +59,34 @@ import com.ibm.icu.text.Normalizer2;
  * All foldings, case folding, and normalization mappings are applied recursively
  * to ensure a fully folded and normalized result.
  * </p>
+ * <p>
+ * A normalizer with additional settings such as a filter that lists characters not
+ * to be normalized can be passed in the constructor.
+ * </p>
  */
 public final class ICUFoldingFilter extends ICUNormalizer2Filter {
-  // TODO: if the wrong version of the ICU jar is used, loading these data files may give a strange error.
-  // maybe add an explicit check? http://icu-project.org/apiref/icu4j/com/ibm/icu/util/VersionInfo.html
-  private static final Normalizer2 normalizer =  Normalizer2.getInstance(
-      ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"), 
-      "utr30", Normalizer2.Mode.COMPOSE);
-  
+  /**
+   * A normalizer for search term folding to Unicode text,
+   * applying foldings from UTR#30 Character Foldings.
+   */
+  public static final Normalizer2 NORMALIZER = Normalizer2.getInstance(
+    // TODO: if the wrong version of the ICU jar is used, loading these data files may give a strange error.
+    // maybe add an explicit check? http://icu-project.org/apiref/icu4j/com/ibm/icu/util/VersionInfo.html
+    ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"),
+    "utr30", Normalizer2.Mode.COMPOSE);
+
   /**
    * Create a new ICUFoldingFilter on the specified input
    */
   public ICUFoldingFilter(TokenStream input) {
+    super(input, NORMALIZER);
+  }
+
+  /**
+   * Create a new ICUFoldingFilter on the specified input with the specified
+   * normalizer
+   */
+  public ICUFoldingFilter(TokenStream input, Normalizer2 normalizer) {
     super(input, normalizer);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6781a0d2/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
index 036874a..1065cbf 100644
--- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
+++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
@@ -25,7 +25,11 @@ import org.apache.lucene.analysis.util.AbstractAnalysisFactory; // javadocs
 import org.apache.lucene.analysis.util.MultiTermAwareComponent;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
-/** 
+import com.ibm.icu.text.FilteredNormalizer2;
+import com.ibm.icu.text.Normalizer2;
+import com.ibm.icu.text.UnicodeSet;
+
+/**
  * Factory for {@link ICUFoldingFilter}.
  * <pre class="prettyprint">
  * &lt;fieldType name="text_folded" class="solr.TextField" positionIncrementGap="100"&gt;
@@ -37,18 +41,30 @@ import org.apache.lucene.analysis.util.TokenFilterFactory;
  * @since 3.1.0
  */
 public class ICUFoldingFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent {
+  private final Normalizer2 normalizer;
 
   /** Creates a new ICUFoldingFilterFactory */
   public ICUFoldingFilterFactory(Map<String,String> args) {
     super(args);
+
+    Normalizer2 normalizer = ICUFoldingFilter.NORMALIZER;
+    String filter = get(args, "filter");
+    if (filter != null) {
+      UnicodeSet set = new UnicodeSet(filter);
+      if (!set.isEmpty()) {
+        set.freeze();
+        normalizer = new FilteredNormalizer2(normalizer, set);
+      }
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
+    this.normalizer = normalizer;
   }
 
   @Override
   public TokenStream create(TokenStream input) {
-    return new ICUFoldingFilter(input);
+    return new ICUFoldingFilter(input, normalizer);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6781a0d2/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java
index 3782216..3e3c523 100644
--- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java
+++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java
@@ -26,7 +26,7 @@ import org.apache.lucene.analysis.TokenStream;
 
 /** basic tests for {@link ICUFoldingFilterFactory} */
 public class TestICUFoldingFilterFactory extends BaseTokenStreamTestCase {
-  
+
   /** basic tests to ensure the folding is working */
   public void test() throws Exception {
     Reader reader = new StringReader("Résumé");
@@ -35,7 +35,24 @@ public class TestICUFoldingFilterFactory extends BaseTokenStreamTestCase {
     stream = factory.create(stream);
     assertTokenStreamContents(stream, new String[] { "resume" });
   }
-  
+
+  /** test to ensure the filter parameter is working */
+  public void testFilter() throws Exception {
+    HashMap<String,String> args = new HashMap<String,String>();
+    args.put("filter", "[^ö]");
+    ICUFoldingFilterFactory factory = new ICUFoldingFilterFactory(args);
+
+    Reader reader = new StringReader("Résumé");
+    TokenStream stream = whitespaceMockTokenizer(reader);
+    stream = factory.create(stream);
+    assertTokenStreamContents(stream, new String[] { "resume" });
+
+    reader = new StringReader("Fönster");
+    stream = whitespaceMockTokenizer(reader);
+    stream = factory.create(stream);
+    assertTokenStreamContents(stream, new String[] { "fönster" });
+  }
+
   /** Test that bogus arguments result in exception */
   public void testBogusArguments() throws Exception {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {


[25/41] lucene-solr:jira/solr-11702: SOLR-11810: Upgrade Jetty to 9.4.8

Posted by da...@apache.org.
SOLR-11810: Upgrade Jetty to 9.4.8


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a6b5c5bf
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a6b5c5bf
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a6b5c5bf

Branch: refs/heads/jira/solr-11702
Commit: a6b5c5bfb0dec510ebd53555dbefd3ea703cdbe6
Parents: a3c4f73
Author: Erick Erickson <er...@apache.org>
Authored: Tue Jan 16 12:14:03 2018 -0800
Committer: Erick Erickson <er...@apache.org>
Committed: Tue Jan 16 12:14:03 2018 -0800

----------------------------------------------------------------------
 lucene/ivy-versions.properties                           |  2 +-
 .../jetty-continuation-9.3.20.v20170531.jar.sha1         |  1 -
 .../licenses/jetty-continuation-9.4.8.v20171121.jar.sha1 |  1 +
 lucene/licenses/jetty-http-9.3.20.v20170531.jar.sha1     |  1 -
 lucene/licenses/jetty-http-9.4.8.v20171121.jar.sha1      |  1 +
 lucene/licenses/jetty-io-9.3.20.v20170531.jar.sha1       |  1 -
 lucene/licenses/jetty-io-9.4.8.v20171121.jar.sha1        |  1 +
 lucene/licenses/jetty-server-9.3.20.v20170531.jar.sha1   |  1 -
 lucene/licenses/jetty-server-9.4.8.v20171121.jar.sha1    |  1 +
 lucene/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1  |  1 -
 lucene/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1   |  1 +
 lucene/licenses/jetty-util-9.3.20.v20170531.jar.sha1     |  1 -
 lucene/licenses/jetty-util-9.4.8.v20171121.jar.sha1      |  1 +
 .../org/apache/lucene/replicator/ReplicatorTestCase.java |  4 ++--
 solr/CHANGES.txt                                         |  6 ++++--
 .../solr/client/solrj/embedded/JettySolrRunner.java      |  8 ++++----
 .../jetty-continuation-9.3.20.v20170531.jar.sha1         |  1 -
 .../licenses/jetty-continuation-9.4.8.v20171121.jar.sha1 |  1 +
 solr/licenses/jetty-deploy-9.3.20.v20170531.jar.sha1     |  1 -
 solr/licenses/jetty-deploy-9.4.8.v20171121.jar.sha1      |  1 +
 solr/licenses/jetty-http-9.3.20.v20170531.jar.sha1       |  1 -
 solr/licenses/jetty-http-9.4.8.v20171121.jar.sha1        |  1 +
 solr/licenses/jetty-io-9.3.20.v20170531.jar.sha1         |  1 -
 solr/licenses/jetty-io-9.4.8.v20171121.jar.sha1          |  1 +
 solr/licenses/jetty-jmx-9.3.20.v20170531.jar.sha1        |  1 -
 solr/licenses/jetty-jmx-9.4.8.v20171121.jar.sha1         |  1 +
 solr/licenses/jetty-rewrite-9.3.20.v20170531.jar.sha1    |  1 -
 solr/licenses/jetty-rewrite-9.4.8.v20171121.jar.sha1     |  1 +
 solr/licenses/jetty-security-9.3.20.v20170531.jar.sha1   |  1 -
 solr/licenses/jetty-security-9.4.8.v20171121.jar.sha1    |  1 +
 solr/licenses/jetty-server-9.3.20.v20170531.jar.sha1     |  1 -
 solr/licenses/jetty-server-9.4.8.v20171121.jar.sha1      |  1 +
 solr/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1    |  1 -
 solr/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1     |  1 +
 solr/licenses/jetty-servlets-9.3.20.v20170531.jar.sha1   |  1 -
 solr/licenses/jetty-servlets-9.4.8.v20171121.jar.sha1    |  1 +
 solr/licenses/jetty-util-9.3.20.v20170531.jar.sha1       |  1 -
 solr/licenses/jetty-util-9.4.8.v20171121.jar.sha1        |  1 +
 solr/licenses/jetty-webapp-9.3.20.v20170531.jar.sha1     |  1 -
 solr/licenses/jetty-webapp-9.4.8.v20171121.jar.sha1      |  1 +
 solr/licenses/jetty-xml-9.3.20.v20170531.jar.sha1        |  1 -
 solr/licenses/jetty-xml-9.4.8.v20171121.jar.sha1         |  1 +
 solr/licenses/start.jar.sha1                             |  2 +-
 .../solr/client/solrj/embedded/JettyWebappTest.java      |  4 ++--
 .../src/java/org/apache/solr/util/SSLTestConfig.java     | 11 +++++------
 45 files changed, 38 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/ivy-versions.properties
----------------------------------------------------------------------
diff --git a/lucene/ivy-versions.properties b/lucene/ivy-versions.properties
index 4021d3e..bd2e052 100644
--- a/lucene/ivy-versions.properties
+++ b/lucene/ivy-versions.properties
@@ -230,7 +230,7 @@ org.codehaus.janino.version = 2.7.6
 /org.codehaus.woodstox/stax2-api = 3.1.4
 /org.codehaus.woodstox/woodstox-core-asl = 4.4.1
 
-org.eclipse.jetty.version = 9.3.20.v20170531
+org.eclipse.jetty.version = 9.4.8.v20171121
 /org.eclipse.jetty/jetty-continuation = ${org.eclipse.jetty.version}
 /org.eclipse.jetty/jetty-deploy = ${org.eclipse.jetty.version}
 /org.eclipse.jetty/jetty-http = ${org.eclipse.jetty.version}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1 b/lucene/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 4e086fc..0000000
--- a/lucene/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0176f1ef8366257e7b6214c3bbd710cf47593135

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1 b/lucene/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..f519f05
--- /dev/null
+++ b/lucene/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+34b64138f6589d3d32d02058fe73ec788cb981bf

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-http-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-http-9.3.20.v20170531.jar.sha1 b/lucene/licenses/jetty-http-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 1936a2e..0000000
--- a/lucene/licenses/jetty-http-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-32f5fe22ed468a49df1ffcbb27c39c1b53f261aa

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-http-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-http-9.4.8.v20171121.jar.sha1 b/lucene/licenses/jetty-http-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..1e97da0
--- /dev/null
+++ b/lucene/licenses/jetty-http-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+9879d6c4e37400bf43f0cd4b3c6e34a3ba409864

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-io-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-io-9.3.20.v20170531.jar.sha1 b/lucene/licenses/jetty-io-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 5d47c21..0000000
--- a/lucene/licenses/jetty-io-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5b68e7761fcacefcf26ad9ab50943db65fda2c3d

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-io-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-io-9.4.8.v20171121.jar.sha1 b/lucene/licenses/jetty-io-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..2396010
--- /dev/null
+++ b/lucene/licenses/jetty-io-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+d3fe2dfa62f52ee91ff07cb359f63387e0e30b40

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-server-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-server-9.3.20.v20170531.jar.sha1 b/lucene/licenses/jetty-server-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 0c9d435..0000000
--- a/lucene/licenses/jetty-server-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6a1523d44ebb527eed068a5c8bfd22edd6a20530

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-server-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-server-9.4.8.v20171121.jar.sha1 b/lucene/licenses/jetty-server-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..1369bae
--- /dev/null
+++ b/lucene/licenses/jetty-server-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+34614bd9a29de57ef28ca31f1f2b49a412af196d

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1 b/lucene/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 452932d..0000000
--- a/lucene/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21a698f9d58d03cdf58bf2a40f93de58c2eab138

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1 b/lucene/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..5632347
--- /dev/null
+++ b/lucene/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+bbbb9b5de08f468c7b9b3de6aea0b098d2c679b6

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-util-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-util-9.3.20.v20170531.jar.sha1 b/lucene/licenses/jetty-util-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 7d020a4..0000000
--- a/lucene/licenses/jetty-util-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-19ce4203809da37f8ea7a5632704fa71b6f0ccc2

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/licenses/jetty-util-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/lucene/licenses/jetty-util-9.4.8.v20171121.jar.sha1 b/lucene/licenses/jetty-util-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..7a3c6ad
--- /dev/null
+++ b/lucene/licenses/jetty-util-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+d6ec1a1613c7fa72aa6bf5d8c204750afbc3df3b

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
index 6d27071..98349c3 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
@@ -28,7 +28,7 @@ import org.eclipse.jetty.server.SecureRequestCustomizer;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.server.SslConnectionFactory;
-import org.eclipse.jetty.server.session.HashSessionIdManager;
+import org.eclipse.jetty.server.session.DefaultSessionIdManager;
 import org.eclipse.jetty.util.ssl.SslContextFactory;
 import org.eclipse.jetty.util.thread.QueuedThreadPool;
 import org.junit.AfterClass;
@@ -109,7 +109,7 @@ public abstract class ReplicatorTestCase extends LuceneTestCase {
     connector.setHost("127.0.0.1");
 
     server.setConnectors(new Connector[] {connector});
-    server.setSessionIdManager(new HashSessionIdManager(new Random(random().nextLong())));
+    server.setSessionIdManager(new DefaultSessionIdManager(server, new Random(random().nextLong())));
     server.setHandler(handler);
     
     server.start();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 187976d..8376794 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -47,7 +47,7 @@ Carrot2 3.15.0
 Velocity 1.7 and Velocity Tools 2.0
 Apache UIMA 2.3.1
 Apache ZooKeeper 3.4.10
-Jetty 9.3.20.v20170531
+Jetty 9.4.8.v20171121
 
 Upgrade Notes
 ----------------------
@@ -89,9 +89,11 @@ New Features
 
 * SOLR-11064: Collection APIs should use the disk space hint when using policy framework  (noble)
 
-* SOLR-11854: multivalued primative fields can now be sorted by implicitly choosing the min/max
+* SOLR-11854: multivalued primitive fields can now be sorted by implicitly choosing the min/max
   value for asc/desc sort orders. (hossman)
 
+* SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
index 608b234..e5b81f8 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
@@ -53,11 +53,11 @@ import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.server.SslConnectionFactory;
 import org.eclipse.jetty.server.handler.gzip.GzipHandler;
-import org.eclipse.jetty.server.session.HashSessionIdManager;
-import org.eclipse.jetty.servlet.BaseHolder;
+import org.eclipse.jetty.server.session.DefaultSessionIdManager;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.servlet.ServletContextHandler;
 import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.servlet.Source;
 import org.eclipse.jetty.util.component.LifeCycle;
 import org.eclipse.jetty.util.ssl.SslContextFactory;
 import org.eclipse.jetty.util.thread.QueuedThreadPool;
@@ -248,7 +248,7 @@ public class JettySolrRunner {
       connector.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS);
       
       server.setConnectors(new Connector[] {connector});
-      server.setSessionIdManager(new HashSessionIdManager(new Random()));
+      server.setSessionIdManager(new DefaultSessionIdManager(server, new Random()));
     } else {
       ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory());
       connector.setPort(port);
@@ -300,7 +300,7 @@ public class JettySolrRunner {
           String pathSpec = config.extraServlets.get(servletHolder);
           root.addServlet(servletHolder, pathSpec);
         }
-        dispatchFilter = root.getServletHandler().newFilterHolder(BaseHolder.Source.EMBEDDED);
+        dispatchFilter = root.getServletHandler().newFilterHolder(Source.EMBEDDED);
         dispatchFilter.setHeldClass(SolrDispatchFilter.class);
         dispatchFilter.setInitParameter("excludePatterns", excludePatterns);
         root.addFilter(dispatchFilter, "*", EnumSet.of(DispatcherType.REQUEST));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 4e086fc..0000000
--- a/solr/licenses/jetty-continuation-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0176f1ef8366257e7b6214c3bbd710cf47593135

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..f519f05
--- /dev/null
+++ b/solr/licenses/jetty-continuation-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+34b64138f6589d3d32d02058fe73ec788cb981bf

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-deploy-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-deploy-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-deploy-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index f7f0478..0000000
--- a/solr/licenses/jetty-deploy-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-160c0cefd2fddacd040c41801f40a5a372a9302c

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-deploy-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-deploy-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-deploy-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..293df7b
--- /dev/null
+++ b/solr/licenses/jetty-deploy-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+5737f32c5f017905f92baea5479d6ee9c5405dc8

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-http-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-http-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-http-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 1936a2e..0000000
--- a/solr/licenses/jetty-http-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-32f5fe22ed468a49df1ffcbb27c39c1b53f261aa

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-http-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-http-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-http-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..1e97da0
--- /dev/null
+++ b/solr/licenses/jetty-http-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+9879d6c4e37400bf43f0cd4b3c6e34a3ba409864

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-io-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-io-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-io-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 5d47c21..0000000
--- a/solr/licenses/jetty-io-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5b68e7761fcacefcf26ad9ab50943db65fda2c3d

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-io-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-io-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-io-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..2396010
--- /dev/null
+++ b/solr/licenses/jetty-io-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+d3fe2dfa62f52ee91ff07cb359f63387e0e30b40

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-jmx-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-jmx-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-jmx-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 90b06d7..0000000
--- a/solr/licenses/jetty-jmx-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4a28dd045b8992752ff7727f25cf9e888e9c8c4c

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-jmx-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-jmx-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-jmx-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..7b4bb76
--- /dev/null
+++ b/solr/licenses/jetty-jmx-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+7ee4d6a96cced3c1758125e3a5ad26281c5c7123

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-rewrite-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-rewrite-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-rewrite-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index ddb45f1..0000000
--- a/solr/licenses/jetty-rewrite-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8fb029863ceb6531ee0e24c59a004f622226217b

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-rewrite-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-rewrite-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-rewrite-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..3bdc42d
--- /dev/null
+++ b/solr/licenses/jetty-rewrite-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+755ec66c0d7bb4fc7c4293dfca329c2cf4f044f3

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-security-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-security-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-security-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index d4f40c4..0000000
--- a/solr/licenses/jetty-security-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9e2ded957c05f447a0611fa64ca4ab5f7cc5aa65

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-security-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-security-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-security-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..c3f377d
--- /dev/null
+++ b/solr/licenses/jetty-security-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+e8350eec683b55494287f06740543e4be6f75425

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-server-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-server-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-server-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 0c9d435..0000000
--- a/solr/licenses/jetty-server-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6a1523d44ebb527eed068a5c8bfd22edd6a20530

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-server-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-server-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-server-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..1369bae
--- /dev/null
+++ b/solr/licenses/jetty-server-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+34614bd9a29de57ef28ca31f1f2b49a412af196d

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 452932d..0000000
--- a/solr/licenses/jetty-servlet-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21a698f9d58d03cdf58bf2a40f93de58c2eab138

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..5632347
--- /dev/null
+++ b/solr/licenses/jetty-servlet-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+bbbb9b5de08f468c7b9b3de6aea0b098d2c679b6

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-servlets-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-servlets-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-servlets-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 5790855..0000000
--- a/solr/licenses/jetty-servlets-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0bb3b1ddc06525eba71c37f51402996502d323a9

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-servlets-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-servlets-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-servlets-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..93e3e42
--- /dev/null
+++ b/solr/licenses/jetty-servlets-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+f7b7f3d6be91f5e1a47b4d3ecaf286652b4d1332

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-util-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-util-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-util-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 7d020a4..0000000
--- a/solr/licenses/jetty-util-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-19ce4203809da37f8ea7a5632704fa71b6f0ccc2

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-util-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-util-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-util-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..7a3c6ad
--- /dev/null
+++ b/solr/licenses/jetty-util-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+d6ec1a1613c7fa72aa6bf5d8c204750afbc3df3b

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-webapp-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-webapp-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-webapp-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index 1fa7b15..0000000
--- a/solr/licenses/jetty-webapp-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5b41166ce279c481216501d45c0d0f4f6da23c0b

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-webapp-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-webapp-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-webapp-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..cb995a6
--- /dev/null
+++ b/solr/licenses/jetty-webapp-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+695278449233cee9bae9eed930a5264b574774f0

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-xml-9.3.20.v20170531.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-xml-9.3.20.v20170531.jar.sha1 b/solr/licenses/jetty-xml-9.3.20.v20170531.jar.sha1
deleted file mode 100644
index a8a3f2b..0000000
--- a/solr/licenses/jetty-xml-9.3.20.v20170531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9f3f158a6a4587c4283561a3a3fc5a187173becf

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/jetty-xml-9.4.8.v20171121.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/jetty-xml-9.4.8.v20171121.jar.sha1 b/solr/licenses/jetty-xml-9.4.8.v20171121.jar.sha1
new file mode 100644
index 0000000..c427daa
--- /dev/null
+++ b/solr/licenses/jetty-xml-9.4.8.v20171121.jar.sha1
@@ -0,0 +1 @@
+b0d6f87f580a9bd7fa9aaf9b7448bf63cf0ac34f

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/licenses/start.jar.sha1
----------------------------------------------------------------------
diff --git a/solr/licenses/start.jar.sha1 b/solr/licenses/start.jar.sha1
index a964f9a..20b1e61 100644
--- a/solr/licenses/start.jar.sha1
+++ b/solr/licenses/start.jar.sha1
@@ -1 +1 @@
-68b040771da53967c7e48f2ffd7c53732687f425
+54904991ec70b400057a7a82c3603035740a6d53

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java
index 6c13e40..4d76f02 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java
@@ -37,7 +37,7 @@ import org.eclipse.jetty.server.Connector;
 import org.eclipse.jetty.server.HttpConnectionFactory;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
-import org.eclipse.jetty.server.session.HashSessionIdManager;
+import org.eclipse.jetty.server.session.DefaultSessionIdManager;
 import org.eclipse.jetty.webapp.WebAppContext;
 import org.junit.Rule;
 import org.junit.rules.RuleChain;
@@ -74,7 +74,7 @@ public class JettyWebappTest extends SolrTestCaseJ4
 
     server = new Server(port);
     // insecure: only use for tests!!!!
-    server.setSessionIdManager(new HashSessionIdManager(new Random(random().nextLong())));
+    server.setSessionIdManager(new DefaultSessionIdManager(server, new Random(random().nextLong())));
     new WebAppContext(server, path, context );
 
     ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6b5c5bf/solr/test-framework/src/java/org/apache/solr/util/SSLTestConfig.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/util/SSLTestConfig.java b/solr/test-framework/src/java/org/apache/solr/util/SSLTestConfig.java
index b60efdb..8268fcd 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/SSLTestConfig.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/SSLTestConfig.java
@@ -16,7 +16,8 @@
  */
 package org.apache.solr.util;
 
-import java.util.Random;
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
 import java.security.KeyManagementException;
 import java.security.KeyStore;
 import java.security.KeyStoreException;
@@ -24,17 +25,15 @@ import java.security.NoSuchAlgorithmException;
 import java.security.SecureRandom;
 import java.security.SecureRandomSpi;
 import java.security.UnrecoverableKeyException;
-
-import javax.net.ssl.SSLContext;
-import java.net.MalformedURLException;
+import java.util.Random;
 
 import org.apache.http.config.Registry;
 import org.apache.http.config.RegistryBuilder;
 import org.apache.http.conn.socket.ConnectionSocketFactory;
 import org.apache.http.conn.socket.PlainConnectionSocketFactory;
 import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
-import org.apache.http.conn.ssl.SSLContexts;
 import org.apache.http.conn.ssl.SSLContextBuilder;
+import org.apache.http.conn.ssl.SSLContexts;
 import org.apache.http.conn.ssl.SSLSocketFactory;
 import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
 import org.apache.solr.client.solrj.embedded.SSLConfig;
@@ -89,7 +88,7 @@ public class SSLTestConfig extends SSLConfig {
     Resource result;
     try {
       result = Resource.newResource(userInput);
-    } catch (MalformedURLException e) {
+    } catch (IOException e) {
       throw new IllegalArgumentException("Can't build " + type + " Resource: " + e.getMessage(), e);
     }
     if (! result.exists()) {


[33/41] lucene-solr:jira/solr-11702: LUCENE-8130: fix NPE from TermStates.toString

Posted by da...@apache.org.
LUCENE-8130: fix NPE from TermStates.toString


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/fc6f3a45
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/fc6f3a45
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/fc6f3a45

Branch: refs/heads/jira/solr-11702
Commit: fc6f3a45f8bdd1518ed49b68fbdc62988b34644b
Parents: 5425353
Author: Mike McCandless <mi...@apache.org>
Authored: Fri Jan 19 08:53:30 2018 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Fri Jan 19 08:53:30 2018 -0500

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  2 ++
 .../org/apache/lucene/index/TermStates.java     |  2 +-
 .../org/apache/lucene/index/TestTermStates.java | 36 ++++++++++++++++++++
 3 files changed, 39 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fc6f3a45/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 038285e..6b90215 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -144,6 +144,8 @@ Bug Fixes
 
 * LUCENE-8120: Fix LatLonBoundingBox's toString() method (Martijn van Groningen, Adrien Grand)
 
+* LUCENE-8130: Fix NullPointerException from TermStates.toString() (Mike McCandless)
+
 Other
 
 * LUCENE-8111: IndexOrDocValuesQuery Javadoc references outdated method name.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fc6f3a45/lucene/core/src/java/org/apache/lucene/index/TermStates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermStates.java b/lucene/core/src/java/org/apache/lucene/index/TermStates.java
index 3282ac8..4bb83fe 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermStates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermStates.java
@@ -224,7 +224,7 @@ public final class TermStates {
     sb.append("TermStates\n");
     for(TermState termState : states) {
       sb.append("  state=");
-      sb.append(termState.toString());
+      sb.append(termState);
       sb.append('\n');
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fc6f3a45/lucene/core/src/test/org/apache/lucene/index/TestTermStates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermStates.java b/lucene/core/src/test/org/apache/lucene/index/TestTermStates.java
new file mode 100644
index 0000000..a89fe7b
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermStates.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestTermStates extends LuceneTestCase {
+
+  public void testToStringOnNullTermState() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    w.addDocument(new Document());
+    IndexReader r = w.getReader();
+    TermStates states = TermStates.build(r.getContext(), new Term("foo", "bar"), random().nextBoolean());
+    assertEquals("TermStates\n  state=null\n", states.toString());
+    IOUtils.close(r, w, dir);
+  }
+}


[18/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
new file mode 100644
index 0000000..eefe903
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.Cmd;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+
+public class DeleteReplicaCmd implements Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteReplicaCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    deleteReplica(clusterState, message, results,null);
+  }
+
+
+  @SuppressWarnings("unchecked")
+  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+          throws KeeperException, InterruptedException {
+    log.debug("deleteReplica() : {}", Utils.toJSONString(message));
+    boolean parallel = message.getBool("parallel", false);
+
+    //If a count is specified the strategy needs be different
+    if (message.getStr(COUNT_PROP) != null) {
+      deleteReplicaBasedOnCount(clusterState, message, results, onComplete, parallel);
+      return;
+    }
+
+
+    ocmh.checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP);
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String shard = message.getStr(SHARD_ID_PROP);
+    String replicaName = message.getStr(REPLICA_PROP);
+
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Slice slice = coll.getSlice(shard);
+    if (slice == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Invalid shard name : " +  shard + " in collection : " +  collectionName);
+    }
+
+    deleteCore(slice, collectionName, replicaName, message, shard, results, onComplete,  parallel);
+
+  }
+
+
+  /**
+   * Delete replicas based on count for a given collection. If a shard is passed, uses that
+   * else deletes given num replicas across all shards for the given collection.
+   */
+  void deleteReplicaBasedOnCount(ClusterState clusterState,
+                                 ZkNodeProps message,
+                                 NamedList results,
+                                 Runnable onComplete,
+                                 boolean parallel)
+          throws KeeperException, InterruptedException {
+    ocmh.checkRequired(message, COLLECTION_PROP, COUNT_PROP);
+    int count = Integer.parseInt(message.getStr(COUNT_PROP));
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String shard = message.getStr(SHARD_ID_PROP);
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Slice slice = null;
+    //Validate if shard is passed.
+    if (shard != null) {
+      slice = coll.getSlice(shard);
+      if (slice == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "Invalid shard name : " +  shard +  " in collection : " + collectionName);
+      }
+    }
+
+    Map<Slice, Set<String>> shardToReplicasMapping = new HashMap<Slice, Set<String>>();
+    if (slice != null) {
+      Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(slice, shard, collectionName, count);
+      shardToReplicasMapping.put(slice,replicasToBeDeleted);
+    } else {
+
+      //If there are many replicas left, remove the rest based on count.
+      Collection<Slice> allSlices = coll.getSlices();
+      for (Slice individualSlice : allSlices) {
+        Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(individualSlice, individualSlice.getName(), collectionName, count);
+        shardToReplicasMapping.put(individualSlice, replicasToBeDeleted);
+      }
+    }
+
+    for (Slice shardSlice: shardToReplicasMapping.keySet()) {
+      String shardId = shardSlice.getName();
+      Set<String> replicas = shardToReplicasMapping.get(shardSlice);
+      //callDeleteReplica on all replicas
+      for (String replica: replicas) {
+        log.debug("Deleting replica {}  for shard {} based on count {}", replica, shardId, count);
+        deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
+      }
+      results.add("shard_id", shardId);
+      results.add("replicas_deleted", replicas);
+    }
+
+  }
+
+
+  /**
+   * Pick replicas to be deleted. Avoid picking the leader.
+   */
+  private Set<String> pickReplicasTobeDeleted(Slice slice, String shard, String collectionName, int count) {
+    validateReplicaAvailability(slice, shard, collectionName, count);
+    Collection<Replica> allReplicas = slice.getReplicas();
+    Set<String> replicasToBeRemoved = new HashSet<String>();
+    Replica leader = slice.getLeader();
+    for (Replica replica: allReplicas) {
+      if (count == 0) {
+        break;
+      }
+      //Try avoiding to pick up the leader to minimize activity on the cluster.
+      if (leader.getCoreName().equals(replica.getCoreName())) {
+        continue;
+      }
+      replicasToBeRemoved.add(replica.getName());
+      count --;
+    }
+    return replicasToBeRemoved;
+  }
+
+  /**
+   * Validate if there is less replicas than requested to remove. Also error out if there is
+   * only one replica available
+   */
+  private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
+    //If there is a specific shard passed, validate if there any or just 1 replica left
+    if (slice != null) {
+      Collection<Replica> allReplicasForShard = slice.getReplicas();
+      if (allReplicasForShard == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found  in shard/collection: " +
+                shard + "/"  + collectionName);
+      }
+
+
+      if (allReplicasForShard.size() == 1) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
+                shard + "/" + collectionName + ". Cannot delete that.");
+      }
+
+      if (allReplicasForShard.size() <= count) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
+                shard + "/"  + collectionName  + " Requested: "  + count + " Available: " + allReplicasForShard.size() + ".");
+      }
+    }
+  }
+
+  void deleteCore(Slice slice, String collectionName, String replicaName,ZkNodeProps message, String shard, NamedList results, Runnable onComplete, boolean parallel) throws KeeperException, InterruptedException {
+
+    Replica replica = slice.getReplica(replicaName);
+    if (replica == null) {
+      ArrayList<String> l = new ArrayList<>();
+      for (Replica r : slice.getReplicas())
+        l.add(r.getName());
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " +  replicaName + " in shard/collection : " +
+              shard  + "/" + collectionName + " available replicas are " +  StrUtils.join(l, ','));
+    }
+
+    // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
+    // on the command.
+    if (Boolean.parseBoolean(message.getStr(OverseerCollectionMessageHandler.ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Attempted to remove replica : " + collectionName + "/"  + shard + "/" + replicaName +
+              " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
+    }
+
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+    String asyncId = message.getStr(ASYNC);
+    AtomicReference<Map<String, String>> requestMap = new AtomicReference<>(null);
+    if (asyncId != null) {
+      requestMap.set(new HashMap<>(1, 1.0f));
+    }
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.add(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
+    params.add(CoreAdminParams.CORE, core);
+
+    params.set(CoreAdminParams.DELETE_INDEX, message.getBool(CoreAdminParams.DELETE_INDEX, true));
+    params.set(CoreAdminParams.DELETE_INSTANCE_DIR, message.getBool(CoreAdminParams.DELETE_INSTANCE_DIR, true));
+    params.set(CoreAdminParams.DELETE_DATA_DIR, message.getBool(CoreAdminParams.DELETE_DATA_DIR, true));
+
+    boolean isLive = ocmh.zkStateReader.getClusterState().getLiveNodes().contains(replica.getNodeName());
+    if (isLive) {
+      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap.get());
+    }
+
+    Callable<Boolean> callable = () -> {
+      try {
+        if (isLive) {
+          ocmh.processResponses(results, shardHandler, false, null, asyncId, requestMap.get());
+
+          //check if the core unload removed the corenode zk entry
+          if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 5000)) return Boolean.TRUE;
+        }
+
+        // try and ensure core info is removed from cluster state
+        ocmh.deleteCoreNode(collectionName, replicaName, replica, core);
+        if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return Boolean.TRUE;
+        return Boolean.FALSE;
+      } catch (Exception e) {
+        results.add("failure", "Could not complete delete " + e.getMessage());
+        throw e;
+      } finally {
+        if (onComplete != null) onComplete.run();
+      }
+    };
+
+    if (!parallel) {
+      try {
+        if (!callable.call())
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+                  "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
+      } catch (InterruptedException | KeeperException e) {
+        throw e;
+      } catch (Exception ex) {
+        throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Error waiting for corenode gone", ex);
+      }
+
+    } else {
+      ocmh.tpe.submit(callable);
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
new file mode 100644
index 0000000..2ef2955
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
@@ -0,0 +1,178 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class DeleteShardCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public DeleteShardCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
+    String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
+
+    log.info("Delete shard invoked");
+    Slice slice = clusterState.getCollection(collectionName).getSlice(sliceId);
+    if (slice == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+        "No shard with name " + sliceId + " exists for collection " + collectionName);
+
+    // For now, only allow for deletions of Inactive slices or custom hashes (range==null).
+    // TODO: Add check for range gaps on Slice deletion
+    final Slice.State state = slice.getState();
+    if (!(slice.getRange() == null || state == Slice.State.INACTIVE || state == Slice.State.RECOVERY
+        || state == Slice.State.CONSTRUCTION) || state == Slice.State.RECOVERY_FAILED) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
+          + ". Only non-active (or custom-hashed) slices can be deleted.");
+    }
+
+    if (state == Slice.State.RECOVERY)  {
+      // mark the slice as 'construction' and only then try to delete the cores
+      // see SOLR-9455
+      DistributedQueue inQueue = Overseer.getStateUpdateQueue(ocmh.zkStateReader.getZkClient());
+      Map<String, Object> propMap = new HashMap<>();
+      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+      propMap.put(sliceId, Slice.State.CONSTRUCTION.toString());
+      propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+      ZkNodeProps m = new ZkNodeProps(propMap);
+      inQueue.offer(Utils.toJSON(m));
+    }
+
+    String asyncId = message.getStr(ASYNC);
+
+    try {
+      List<ZkNodeProps> replicas = getReplicasForSlice(collectionName, slice);
+      CountDownLatch cleanupLatch = new CountDownLatch(replicas.size());
+      for (ZkNodeProps r : replicas) {
+        final ZkNodeProps replica = r.plus(message.getProperties()).plus("parallel", "true").plus(ASYNC, asyncId);
+        log.info("Deleting replica for collection={} shard={} on node={}", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(CoreAdminParams.NODE));
+        NamedList deleteResult = new NamedList();
+        try {
+          ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, replica, deleteResult, () -> {
+            cleanupLatch.countDown();
+            if (deleteResult.get("failure") != null) {
+              synchronized (results) {
+                results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
+                    " on node=%s", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(NODE_NAME_PROP)));
+              }
+            }
+            SimpleOrderedMap success = (SimpleOrderedMap) deleteResult.get("success");
+            if (success != null) {
+              synchronized (results)  {
+                results.add("success", success);
+              }
+            }
+          });
+        } catch (KeeperException e) {
+          log.warn("Error deleting replica: " + r, e);
+          cleanupLatch.countDown();
+        } catch (Exception e) {
+          log.warn("Error deleting replica: " + r, e);
+          cleanupLatch.countDown();
+          throw e;
+        }
+      }
+      log.debug("Waiting for delete shard action to complete");
+      cleanupLatch.await(5, TimeUnit.MINUTES);
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP,
+          collectionName, ZkStateReader.SHARD_ID_PROP, sliceId);
+      ZkStateReader zkStateReader = ocmh.zkStateReader;
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+
+      // wait for a while until we don't see the shard
+      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+      boolean removed = false;
+      while (!timeout.hasTimedOut()) {
+        timeout.sleep(100);
+        DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
+        removed = collection.getSlice(sliceId) == null;
+        if (removed) {
+          timeout.sleep(100); // just a bit of time so it's more likely other readers see on return
+          break;
+        }
+      }
+      if (!removed) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "Could not fully remove collection: " + collectionName + " shard: " + sliceId);
+      }
+
+      log.info("Successfully deleted collection: " + collectionName + ", shard: " + sliceId);
+    } catch (SolrException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "Error executing delete operation for collection: " + collectionName + " shard: " + sliceId, e);
+    }
+  }
+
+  private List<ZkNodeProps> getReplicasForSlice(String collectionName, Slice slice) {
+    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
+    for (Replica replica : slice.getReplicas()) {
+      ZkNodeProps props = new ZkNodeProps(
+          COLLECTION_PROP, collectionName,
+          SHARD_ID_PROP, slice.getName(),
+          ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
+          ZkStateReader.REPLICA_PROP, replica.getName(),
+          CoreAdminParams.NODE, replica.getNodeName());
+      sourceReplicas.add(props);
+    }
+    return sourceReplicas;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
new file mode 100644
index 0000000..cf0a234
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the functionality of deleting a collection level snapshot.
+ */
+public class DeleteSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName =  message.getStr(COLLECTION_PROP);
+    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
+    String asyncId = message.getStr(ASYNC);
+    Map<String, String> requestMap = new HashMap<>();
+    NamedList shardRequestResults = new NamedList();
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
+
+    Optional<CollectionSnapshotMetaData> meta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
+    if (!meta.isPresent()) { // Snapshot not found. Nothing to do.
+      return;
+    }
+
+    log.info("Deleting a snapshot for collection={} with commitName={}", collectionName, commitName);
+
+    Set<String> existingCores = new HashSet<>();
+    for (Slice s : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
+      for (Replica r : s.getReplicas()) {
+        existingCores.add(r.getCoreName());
+      }
+    }
+
+    Set<String> coresWithSnapshot = new HashSet<>();
+    for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
+      if (existingCores.contains(m.getCoreName())) {
+        coresWithSnapshot.add(m.getCoreName());
+      }
+    }
+
+    log.info("Existing cores with snapshot for collection={} are {}", collectionName, existingCores);
+    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        if (replica.getState() == State.DOWN) {
+          continue; // Since replica is down - no point sending a request.
+        }
+
+        // Note - when a snapshot is found in_progress state - it is the result of overseer
+        // failure while handling the snapshot creation. Since we don't know the exact set of
+        // replicas to contact at this point, we try on all replicas.
+        if (meta.get().getStatus() == SnapshotStatus.InProgress || coresWithSnapshot.contains(replica.getCoreName())) {
+          String coreName = replica.getStr(CORE_NAME_PROP);
+
+          ModifiableSolrParams params = new ModifiableSolrParams();
+          params.set(CoreAdminParams.ACTION, CoreAdminAction.DELETESNAPSHOT.toString());
+          params.set(NAME, slice.getName());
+          params.set(CORE_NAME_PROP, coreName);
+          params.set(CoreAdminParams.COMMIT_NAME, commitName);
+
+          log.info("Sending deletesnapshot request to core={} with commitName={}", coreName, commitName);
+          ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
+        }
+      }
+    }
+
+    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
+    NamedList success = (NamedList) shardRequestResults.get("success");
+    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
+    if (success != null) {
+      for ( int i = 0 ; i < success.size() ; i++) {
+        NamedList resp = (NamedList)success.getVal(i);
+        // Unfortunately async processing logic doesn't provide the "core" name automatically.
+        String coreName = (String)resp.get("core");
+        coresWithSnapshot.remove(coreName);
+      }
+    }
+
+    if (!coresWithSnapshot.isEmpty()) { // One or more failures.
+      log.warn("Failed to delete a snapshot for collection {} with commitName = {}. Snapshot could not be deleted for following cores {}",
+          collectionName, commitName, coresWithSnapshot);
+
+      List<CoreSnapshotMetaData> replicasWithSnapshot = new ArrayList<>();
+      for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
+        if (coresWithSnapshot.contains(m.getCoreName())) {
+          replicasWithSnapshot.add(m);
+        }
+      }
+
+      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
+      // which cores still contain the named snapshot.
+      CollectionSnapshotMetaData newResult = new CollectionSnapshotMetaData(meta.get().getName(), SnapshotStatus.Failed,
+          meta.get().getCreationDate(), replicasWithSnapshot);
+      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, newResult);
+      log.info("Saved snapshot information for collection={} with commitName={} in Zookeeper as follows", collectionName, commitName,
+          Utils.toJSON(newResult));
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to delete snapshot on cores " + coresWithSnapshot);
+
+    } else {
+      // Delete the ZK path so that we eliminate the references of this snapshot from collection level meta-data.
+      SolrSnapshotManager.deleteCollectionLevelSnapshot(zkClient, collectionName, commitName);
+      log.info("Deleted Zookeeper snapshot metdata for collection={} with commitName={}", collectionName, commitName);
+      log.info("Successfully deleted snapshot for collection={} with commitName={}", collectionName, commitName);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
new file mode 100644
index 0000000..a80fdc0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.Set;
+
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+
+/**
+ * We use this watcher to wait for any eligible replica in a shard to become active so that it can become a leader.
+ */
+public class LeaderRecoveryWatcher implements CollectionStateWatcher {
+  String collectionId;
+  String shardId;
+  String replicaId;
+  String targetCore;
+  SolrCloseableLatch latch;
+
+  /**
+   * Watch for recovery of a replica
+   *
+   * @param collectionId   collection name
+   * @param shardId        shard id
+   * @param replicaId      source replica name (coreNodeName)
+   * @param targetCore     specific target core name - if null then any active replica will do
+   * @param latch countdown when recovered
+   */
+  LeaderRecoveryWatcher(String collectionId, String shardId, String replicaId, String targetCore, SolrCloseableLatch latch) {
+    this.collectionId = collectionId;
+    this.shardId = shardId;
+    this.replicaId = replicaId;
+    this.targetCore = targetCore;
+    this.latch = latch;
+  }
+
+  @Override
+  public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+    if (collectionState == null) { // collection has been deleted - don't wait
+      latch.countDown();
+      return true;
+    }
+    Slice slice = collectionState.getSlice(shardId);
+    if (slice == null) { // shard has been removed - don't wait
+      latch.countDown();
+      return true;
+    }
+    for (Replica replica : slice.getReplicas()) {
+      // check if another replica exists - doesn't have to be the one we're moving
+      // as long as it's active and can become a leader, in which case we don't have to wait
+      // for recovery of specifically the one that we've just added
+      if (!replica.getName().equals(replicaId)) {
+        if (replica.getType().equals(Replica.Type.PULL)) { // not eligible for leader election
+          continue;
+        }
+        // check its state
+        String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+        if (targetCore != null && !targetCore.equals(coreName)) {
+          continue;
+        }
+        if (replica.isActive(liveNodes)) { // recovered - stop waiting
+          latch.countDown();
+          return true;
+        }
+      }
+    }
+    // set the watch again to wait for the new replica to recover
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
new file mode 100644
index 0000000..4edc363
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.RoutingRule;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.handler.component.ShardHandlerFactory;
+import org.apache.solr.update.SolrIndexSplitter;
+import org.apache.solr.util.TimeOut;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.Utils.makeMap;
+
+public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public MigrateCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    String sourceCollectionName = message.getStr("collection");
+    String splitKey = message.getStr("split.key");
+    String targetCollectionName = message.getStr("target.collection");
+    int timeout = message.getInt("forward.timeout", 10 * 60) * 1000;
+
+    DocCollection sourceCollection = clusterState.getCollection(sourceCollectionName);
+    if (sourceCollection == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown source collection: " + sourceCollectionName);
+    }
+    DocCollection targetCollection = clusterState.getCollection(targetCollectionName);
+    if (targetCollection == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown target collection: " + sourceCollectionName);
+    }
+    if (!(sourceCollection.getRouter() instanceof CompositeIdRouter)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source collection must use a compositeId router");
+    }
+    if (!(targetCollection.getRouter() instanceof CompositeIdRouter)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target collection must use a compositeId router");
+    }
+    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
+    CompositeIdRouter targetRouter = (CompositeIdRouter) targetCollection.getRouter();
+    Collection<Slice> sourceSlices = sourceRouter.getSearchSlicesSingle(splitKey, null, sourceCollection);
+    if (sourceSlices.isEmpty()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "No active slices available in source collection: " + sourceCollection + "for given split.key: " + splitKey);
+    }
+    Collection<Slice> targetSlices = targetRouter.getSearchSlicesSingle(splitKey, null, targetCollection);
+    if (targetSlices.isEmpty()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "No active slices available in target collection: " + targetCollection + "for given split.key: " + splitKey);
+    }
+
+    String asyncId = null;
+    if (message.containsKey(ASYNC) && message.get(ASYNC) != null)
+      asyncId = message.getStr(ASYNC);
+
+    for (Slice sourceSlice : sourceSlices) {
+      for (Slice targetSlice : targetSlices) {
+        log.info("Migrating source shard: {} to target shard: {} for split.key = " + splitKey, sourceSlice, targetSlice);
+        migrateKey(clusterState, sourceCollection, sourceSlice, targetCollection, targetSlice, splitKey,
+            timeout, results, asyncId, message);
+      }
+    }
+  }
+
+  private void migrateKey(ClusterState clusterState, DocCollection sourceCollection, Slice sourceSlice,
+                          DocCollection targetCollection, Slice targetSlice,
+                          String splitKey, int timeout,
+                          NamedList results, String asyncId, ZkNodeProps message) throws Exception {
+    String tempSourceCollectionName = "split_" + sourceSlice.getName() + "_temp_" + targetSlice.getName();
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    if (clusterState.hasCollection(tempSourceCollectionName)) {
+      log.info("Deleting temporary collection: " + tempSourceCollectionName);
+      Map<String, Object> props = makeMap(
+          Overseer.QUEUE_OPERATION, DELETE.toLower(),
+          NAME, tempSourceCollectionName);
+
+      try {
+        ocmh.commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
+        clusterState = zkStateReader.getClusterState();
+      } catch (Exception e) {
+        log.warn("Unable to clean up existing temporary collection: " + tempSourceCollectionName, e);
+      }
+    }
+
+    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
+    DocRouter.Range keyHashRange = sourceRouter.keyHashRange(splitKey);
+
+    ShardHandlerFactory shardHandlerFactory = ocmh.shardHandlerFactory;
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+
+    log.info("Hash range for split.key: {} is: {}", splitKey, keyHashRange);
+    // intersect source range, keyHashRange and target range
+    // this is the range that has to be split from source and transferred to target
+    DocRouter.Range splitRange = ocmh.intersect(targetSlice.getRange(), ocmh.intersect(sourceSlice.getRange(), keyHashRange));
+    if (splitRange == null) {
+      log.info("No common hashes between source shard: {} and target shard: {}", sourceSlice.getName(), targetSlice.getName());
+      return;
+    }
+    log.info("Common hash range between source shard: {} and target shard: {} = " + splitRange, sourceSlice.getName(), targetSlice.getName());
+
+    Replica targetLeader = zkStateReader.getLeaderRetry(targetCollection.getName(), targetSlice.getName(), 10000);
+    // For tracking async calls.
+    Map<String, String> requestMap = new HashMap<>();
+
+    log.info("Asking target leader node: " + targetLeader.getNodeName() + " core: "
+        + targetLeader.getStr("core") + " to buffer updates");
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTBUFFERUPDATES.toString());
+    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
+
+    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to buffer updates", asyncId, requestMap);
+
+    ZkNodeProps m = new ZkNodeProps(
+        Overseer.QUEUE_OPERATION, OverseerAction.ADDROUTINGRULE.toLower(),
+        COLLECTION_PROP, sourceCollection.getName(),
+        SHARD_ID_PROP, sourceSlice.getName(),
+        "routeKey", SolrIndexSplitter.getRouteKey(splitKey) + "!",
+        "range", splitRange.toString(),
+        "targetCollection", targetCollection.getName(),
+        "expireAt", RoutingRule.makeExpiryAt(timeout));
+    log.info("Adding routing rule: " + m);
+    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+
+    // wait for a while until we see the new rule
+    log.info("Waiting to see routing rule updated in clusterstate");
+    TimeOut waitUntil = new TimeOut(60, TimeUnit.SECONDS, timeSource);
+    boolean added = false;
+    while (!waitUntil.hasTimedOut()) {
+      waitUntil.sleep(100);
+      sourceCollection = zkStateReader.getClusterState().getCollection(sourceCollection.getName());
+      sourceSlice = sourceCollection.getSlice(sourceSlice.getName());
+      Map<String, RoutingRule> rules = sourceSlice.getRoutingRules();
+      if (rules != null) {
+        RoutingRule rule = rules.get(SolrIndexSplitter.getRouteKey(splitKey) + "!");
+        if (rule != null && rule.getRouteRanges().contains(splitRange)) {
+          added = true;
+          break;
+        }
+      }
+    }
+    if (!added) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not add routing rule: " + m);
+    }
+
+    log.info("Routing rule added successfully");
+
+    // Create temp core on source shard
+    Replica sourceLeader = zkStateReader.getLeaderRetry(sourceCollection.getName(), sourceSlice.getName(), 10000);
+
+    // create a temporary collection with just one node on the shard leader
+    String configName = zkStateReader.readConfigName(sourceCollection.getName());
+    Map<String, Object> props = makeMap(
+        Overseer.QUEUE_OPERATION, CREATE.toLower(),
+        NAME, tempSourceCollectionName,
+        NRT_REPLICAS, 1,
+        OverseerCollectionMessageHandler.NUM_SLICES, 1,
+        OverseerCollectionMessageHandler.COLL_CONF, configName,
+        OverseerCollectionMessageHandler.CREATE_NODE_SET, sourceLeader.getNodeName());
+    if (asyncId != null) {
+      String internalAsyncId = asyncId + Math.abs(System.nanoTime());
+      props.put(ASYNC, internalAsyncId);
+    }
+
+    log.info("Creating temporary collection: " + props);
+    ocmh.commandMap.get(CREATE).call(clusterState, new ZkNodeProps(props), results);
+    // refresh cluster state
+    clusterState = zkStateReader.getClusterState();
+    Slice tempSourceSlice = clusterState.getCollection(tempSourceCollectionName).getSlices().iterator().next();
+    Replica tempSourceLeader = zkStateReader.getLeaderRetry(tempSourceCollectionName, tempSourceSlice.getName(), 120000);
+
+    String tempCollectionReplica1 = tempSourceLeader.getCoreName();
+    String coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
+        sourceLeader.getNodeName(), tempCollectionReplica1);
+    // wait for the replicas to be seen as active on temp source leader
+    log.info("Asking source leader to wait for: " + tempCollectionReplica1 + " to be alive on: " + sourceLeader.getNodeName());
+    CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
+    cmd.setCoreName(tempCollectionReplica1);
+    cmd.setNodeName(sourceLeader.getNodeName());
+    cmd.setCoreNodeName(coreNodeName);
+    cmd.setState(Replica.State.ACTIVE);
+    cmd.setCheckLive(true);
+    cmd.setOnlyIfLeader(true);
+    // we don't want this to happen asynchronously
+    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), new ModifiableSolrParams(cmd.getParams()), shardHandler, null, null);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection leader" +
+        " or timed out waiting for it to come up", asyncId, requestMap);
+
+    log.info("Asking source leader to split index");
+    params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
+    params.set(CoreAdminParams.CORE, sourceLeader.getStr("core"));
+    params.add(CoreAdminParams.TARGET_CORE, tempSourceLeader.getStr("core"));
+    params.set(CoreAdminParams.RANGES, splitRange.toString());
+    params.set("split.key", splitKey);
+
+    String tempNodeName = sourceLeader.getNodeName();
+
+    ocmh.sendShardRequest(tempNodeName, params, shardHandler, asyncId, requestMap);
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to invoke SPLIT core admin command", asyncId, requestMap);
+
+    log.info("Creating a replica of temporary collection: {} on the target leader node: {}",
+        tempSourceCollectionName, targetLeader.getNodeName());
+    String tempCollectionReplica2 = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(),
+        zkStateReader.getClusterState().getCollection(tempSourceCollectionName), tempSourceSlice.getName(), Replica.Type.NRT);
+    props = new HashMap<>();
+    props.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+    props.put(COLLECTION_PROP, tempSourceCollectionName);
+    props.put(SHARD_ID_PROP, tempSourceSlice.getName());
+    props.put("node", targetLeader.getNodeName());
+    props.put(CoreAdminParams.NAME, tempCollectionReplica2);
+    // copy over property params:
+    for (String key : message.keySet()) {
+      if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+        props.put(key, message.getStr(key));
+      }
+    }
+    // add async param
+    if (asyncId != null) {
+      props.put(ASYNC, asyncId);
+    }
+    ((AddReplicaCmd)ocmh.commandMap.get(ADDREPLICA)).addReplica(clusterState, new ZkNodeProps(props), results, null);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create replica of " +
+        "temporary collection in target leader node.", asyncId, requestMap);
+
+    coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
+        targetLeader.getNodeName(), tempCollectionReplica2);
+    // wait for the replicas to be seen as active on temp source leader
+    log.info("Asking temp source leader to wait for: " + tempCollectionReplica2 + " to be alive on: " + targetLeader.getNodeName());
+    cmd = new CoreAdminRequest.WaitForState();
+    cmd.setCoreName(tempSourceLeader.getStr("core"));
+    cmd.setNodeName(targetLeader.getNodeName());
+    cmd.setCoreNodeName(coreNodeName);
+    cmd.setState(Replica.State.ACTIVE);
+    cmd.setCheckLive(true);
+    cmd.setOnlyIfLeader(true);
+    params = new ModifiableSolrParams(cmd.getParams());
+
+    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection" +
+        " replica or timed out waiting for them to come up", asyncId, requestMap);
+
+    log.info("Successfully created replica of temp source collection on target leader node");
+
+    log.info("Requesting merge of temp source collection replica to target leader");
+    params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.MERGEINDEXES.toString());
+    params.set(CoreAdminParams.CORE, targetLeader.getStr("core"));
+    params.set(CoreAdminParams.SRC_CORE, tempCollectionReplica2);
+
+    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+    String msg = "MIGRATE failed to merge " + tempCollectionReplica2 + " to "
+        + targetLeader.getStr("core") + " on node: " + targetLeader.getNodeName();
+    ocmh.processResponses(results, shardHandler, true, msg, asyncId, requestMap);
+
+    log.info("Asking target leader to apply buffered updates");
+    params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
+    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
+
+    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to apply buffered updates",
+        asyncId, requestMap);
+
+    try {
+      log.info("Deleting temporary collection: " + tempSourceCollectionName);
+      props = makeMap(
+          Overseer.QUEUE_OPERATION, DELETE.toLower(),
+          NAME, tempSourceCollectionName);
+      ocmh.commandMap.get(DELETE). call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
+    } catch (Exception e) {
+      log.error("Unable to delete temporary collection: " + tempSourceCollectionName
+          + ". Please remove it manually", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
new file mode 100644
index 0000000..f9392b5
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.cloud.ActiveReplicaWatcher;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.update.UpdateLog;
+import org.apache.solr.util.TimeOut;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
+import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
+import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
+
+public class MoveReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public MoveReplicaCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    moveReplica(ocmh.zkStateReader.getClusterState(), message, results);
+  }
+
+  private void moveReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    log.debug("moveReplica() : {}", Utils.toJSONString(message));
+    ocmh.checkRequired(message, COLLECTION_PROP, CollectionParams.TARGET_NODE);
+    String collection = message.getStr(COLLECTION_PROP);
+    String targetNode = message.getStr(CollectionParams.TARGET_NODE);
+    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
+    boolean inPlaceMove = message.getBool(IN_PLACE_MOVE, true);
+    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
+
+    String async = message.getStr(ASYNC);
+
+    DocCollection coll = clusterState.getCollection(collection);
+    if (coll == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
+    }
+    if (!clusterState.getLiveNodes().contains(targetNode)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target node: " + targetNode + " not in live nodes: " + clusterState.getLiveNodes());
+    }
+    Replica replica = null;
+    if (message.containsKey(REPLICA_PROP)) {
+      String replicaName = message.getStr(REPLICA_PROP);
+      replica = coll.getReplica(replicaName);
+      if (replica == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Collection: " + collection + " replica: " + replicaName + " does not exist");
+      }
+    } else {
+      String sourceNode = message.getStr(CollectionParams.SOURCE_NODE, message.getStr(CollectionParams.FROM_NODE));
+      if (sourceNode == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + CollectionParams.SOURCE_NODE +
+            " or '" + CollectionParams.FROM_NODE + "' is a required param");
+      }
+      String shardId = message.getStr(SHARD_ID_PROP);
+      if (shardId == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + SHARD_ID_PROP + "' is a required param");
+      }
+      Slice slice = clusterState.getCollection(collection).getSlice(shardId);
+      List<Replica> sliceReplicas = new ArrayList<>(slice.getReplicas());
+      Collections.shuffle(sliceReplicas, OverseerCollectionMessageHandler.RANDOM);
+      // this picks up a single random replica from the sourceNode
+      for (Replica r : slice.getReplicas()) {
+        if (r.getNodeName().equals(sourceNode)) {
+          replica = r;
+        }
+      }
+      if (replica == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Collection: " + collection + " node: " + sourceNode + " does not have any replica belonging to shard: " + shardId);
+      }
+    }
+
+    log.info("Replica will be moved to node {}: {}", targetNode, replica);
+    Slice slice = null;
+    for (Slice s : coll.getSlices()) {
+      if (s.getReplicas().contains(replica)) {
+        slice = s;
+      }
+    }
+    assert slice != null;
+    Object dataDir = replica.get("dataDir");
+    boolean isSharedFS = replica.getBool(ZkStateReader.SHARED_STORAGE_PROP, false) && dataDir != null;
+
+    if (isSharedFS && inPlaceMove) {
+      log.debug("-- moveHdfsReplica");
+      moveHdfsReplica(clusterState, results, dataDir.toString(), targetNode, async, coll, replica, slice, timeout, waitForFinalState);
+    } else {
+      log.debug("-- moveNormalReplica (inPlaceMove=" + inPlaceMove + ", isSharedFS=" + isSharedFS);
+      moveNormalReplica(clusterState, results, targetNode, async, coll, replica, slice, timeout, waitForFinalState);
+    }
+  }
+
+  private void moveHdfsReplica(ClusterState clusterState, NamedList results, String dataDir, String targetNode, String async,
+                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
+    String skipCreateReplicaInClusterState = "true";
+    if (clusterState.getLiveNodes().contains(replica.getNodeName())) {
+      skipCreateReplicaInClusterState = "false";
+      ZkNodeProps removeReplicasProps = new ZkNodeProps(
+          COLLECTION_PROP, coll.getName(),
+          SHARD_ID_PROP, slice.getName(),
+          REPLICA_PROP, replica.getName()
+      );
+      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_DATA_DIR, false);
+      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_INDEX, false);
+      if(async!=null) removeReplicasProps.getProperties().put(ASYNC, async);
+      NamedList deleteResult = new NamedList();
+      ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
+      if (deleteResult.get("failure") != null) {
+        String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
+            coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
+        log.warn(errorString);
+        results.add("failure", errorString);
+        return;
+      }
+
+      TimeOut timeOut = new TimeOut(20L, TimeUnit.SECONDS, timeSource);
+      while (!timeOut.hasTimedOut()) {
+        coll = ocmh.zkStateReader.getClusterState().getCollection(coll.getName());
+        if (coll.getReplica(replica.getName()) != null) {
+          timeOut.sleep(100);
+        } else {
+          break;
+        }
+      }
+      if (timeOut.hasTimedOut()) {
+        results.add("failure", "Still see deleted replica in clusterstate!");
+        return;
+      }
+
+    }
+
+    String ulogDir = replica.getStr(CoreAdminParams.ULOG_DIR);
+    ZkNodeProps addReplicasProps = new ZkNodeProps(
+        COLLECTION_PROP, coll.getName(),
+        SHARD_ID_PROP, slice.getName(),
+        CoreAdminParams.NODE, targetNode,
+        CoreAdminParams.CORE_NODE_NAME, replica.getName(),
+        CoreAdminParams.NAME, replica.getCoreName(),
+        WAIT_FOR_FINAL_STATE, String.valueOf(waitForFinalState),
+        SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, skipCreateReplicaInClusterState,
+        CoreAdminParams.ULOG_DIR, ulogDir.substring(0, ulogDir.lastIndexOf(UpdateLog.TLOG_NAME)),
+        CoreAdminParams.DATA_DIR, dataDir);
+    if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
+    NamedList addResult = new NamedList();
+    try {
+      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, addResult, null);
+    } catch (Exception e) {
+      // fatal error - try rolling back
+      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
+      results.add("failure", errorString);
+      log.warn("Error adding replica " + addReplicasProps + " - trying to roll back...", e);
+      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
+      NamedList rollback = new NamedList();
+      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
+      if (rollback.get("failure") != null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
+            + ", collection may be inconsistent: " + rollback.get("failure"));
+      }
+      return;
+    }
+    if (addResult.get("failure") != null) {
+      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
+      log.warn(errorString);
+      results.add("failure", errorString);
+      log.debug("--- trying to roll back...");
+      // try to roll back
+      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
+      NamedList rollback = new NamedList();
+      try {
+        ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
+      } catch (Exception e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
+            + ", collection may be inconsistent!", e);
+      }
+      if (rollback.get("failure") != null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
+            + ", collection may be inconsistent! Failure: " + rollback.get("failure"));
+      }
+      return;
+    } else {
+      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
+          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), replica.getCoreName(), targetNode);
+      results.add("success", successString);
+    }
+  }
+
+  private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
+                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
+    String newCoreName = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), coll, slice.getName(), replica.getType());
+    ZkNodeProps addReplicasProps = new ZkNodeProps(
+        COLLECTION_PROP, coll.getName(),
+        SHARD_ID_PROP, slice.getName(),
+        CoreAdminParams.NODE, targetNode,
+        CoreAdminParams.NAME, newCoreName);
+    if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
+    NamedList addResult = new NamedList();
+    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(1, ocmh);
+    ActiveReplicaWatcher watcher = null;
+    ZkNodeProps props = ocmh.addReplica(clusterState, addReplicasProps, addResult, null);
+    log.debug("props " + props);
+    if (replica.equals(slice.getLeader()) || waitForFinalState) {
+      watcher = new ActiveReplicaWatcher(coll.getName(), null, Collections.singletonList(newCoreName), countDownLatch);
+      log.debug("-- registered watcher " + watcher);
+      ocmh.zkStateReader.registerCollectionStateWatcher(coll.getName(), watcher);
+    }
+    if (addResult.get("failure") != null) {
+      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+          " on node=%s, failure=", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
+      log.warn(errorString);
+      results.add("failure", errorString);
+      if (watcher != null) { // unregister
+        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
+      }
+      return;
+    }
+    // wait for the other replica to be active if the source replica was a leader
+    if (watcher != null) {
+      try {
+        log.debug("Waiting for leader's replica to recover.");
+        if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
+          String errorString = String.format(Locale.ROOT, "Timed out waiting for leader's replica to recover, collection=%s shard=%s" +
+              " on node=%s", coll.getName(), slice.getName(), targetNode);
+          log.warn(errorString);
+          results.add("failure", errorString);
+          return;
+        } else {
+          log.debug("Replica " + watcher.getActiveReplicas() + " is active - deleting the source...");
+        }
+      } finally {
+        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
+      }
+    }
+
+    ZkNodeProps removeReplicasProps = new ZkNodeProps(
+        COLLECTION_PROP, coll.getName(),
+        SHARD_ID_PROP, slice.getName(),
+        REPLICA_PROP, replica.getName());
+    if (async != null) removeReplicasProps.getProperties().put(ASYNC, async);
+    NamedList deleteResult = new NamedList();
+    ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
+    if (deleteResult.get("failure") != null) {
+      String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
+          coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
+      log.warn(errorString);
+      results.add("failure", errorString);
+    } else {
+      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
+          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), newCoreName, targetNode);
+      results.add("success", successString);
+    }
+  }
+}


[28/41] lucene-solr:jira/solr-11702: SOLR-11592: Add OpenNLP language detection to the langid contrib

Posted by da...@apache.org.
SOLR-11592: Add OpenNLP language detection to the langid contrib


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/03095ce4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/03095ce4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/03095ce4

Branch: refs/heads/jira/solr-11702
Commit: 03095ce4d20060a1c63570d8a5214e9858693080
Parents: 5e2ef5e
Author: Steve Rowe <sa...@apache.org>
Authored: Wed Jan 17 11:29:17 2018 -0500
Committer: Steve Rowe <sa...@apache.org>
Committed: Wed Jan 17 11:29:17 2018 -0500

----------------------------------------------------------------------
 dev-tools/idea/solr/contrib/langid/langid.iml   |   1 +
 solr/CHANGES.txt                                |   2 +
 solr/contrib/langid/README.txt                  |   3 +-
 solr/contrib/langid/build.xml                   |  60 +++++++++
 solr/contrib/langid/ivy.xml                     |   1 +
 .../LanguageIdentifierUpdateProcessor.java      |  64 +++++++++
 .../OpenNLPLangDetectUpdateProcessor.java       |  80 ++++++++++++
 ...OpenNLPLangDetectUpdateProcessorFactory.java | 130 +++++++++++++++++++
 .../TikaLanguageIdentifierUpdateProcessor.java  |  65 ----------
 .../opennlp-langdetect.eng-swe-spa-rus-deu.bin  | Bin 0 -> 17702 bytes
 .../conf/solrconfig-languageidentifier.xml      |  25 +++-
 .../opennlp.langdetect.trainer.params.txt       |  17 +++
 ...dentifierUpdateProcessorFactoryTestCase.java |   6 +-
 ...NLPLangDetectUpdateProcessorFactoryTest.java |  66 ++++++++++
 .../detecting-languages-during-indexing.adoc    |  29 ++++-
 15 files changed, 476 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/dev-tools/idea/solr/contrib/langid/langid.iml
----------------------------------------------------------------------
diff --git a/dev-tools/idea/solr/contrib/langid/langid.iml b/dev-tools/idea/solr/contrib/langid/langid.iml
index 28223bd..afeb125 100644
--- a/dev-tools/idea/solr/contrib/langid/langid.iml
+++ b/dev-tools/idea/solr/contrib/langid/langid.iml
@@ -31,5 +31,6 @@
     <orderEntry type="module" module-name="lucene-core" />
     <orderEntry type="module" module-name="solr-core" />
     <orderEntry type="module" module-name="solrj" />
+    <orderEntry type="module" module-name="analysis-common" />
   </component>
 </module>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 8376794..2179602 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -94,6 +94,8 @@ New Features
 
 * SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
 
+* SOLR-11592: Add OpenNLP language detection to the langid contrib. (Koji, Steve Rowe)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/README.txt
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/README.txt b/solr/contrib/langid/README.txt
index 2e6cd54..68a2ea5 100644
--- a/solr/contrib/langid/README.txt
+++ b/solr/contrib/langid/README.txt
@@ -18,4 +18,5 @@ Please refer to the module documentation at http://wiki.apache.org/solr/Language
 Dependencies
 ------------
 The Tika detector depends on Tika Core (which is part of extraction contrib)
-The Langdetect detector depends on LangDetect library
\ No newline at end of file
+The Langdetect detector depends on LangDetect library
+The OpenNLP detector depends on OpenNLP tools and requires a previously trained user-supplied model

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/build.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/build.xml b/solr/contrib/langid/build.xml
index 8341a76..aca7aeb 100644
--- a/solr/contrib/langid/build.xml
+++ b/solr/contrib/langid/build.xml
@@ -25,6 +25,17 @@
 
   <import file="../contrib-build.xml"/>
 
+  <property name="test.model.dir" location="${tests.userdir}/langid/solr/collection1/conf"/>
+  <property name="test.leipzig.folder.link" value="http://pcai056.informatik.uni-leipzig.de/downloads/corpora"/>
+  <property name="test.build.models.dir" location="${build.dir}/build-test-models"/>
+  <property name="test.build.models.data.dir" location="${test.build.models.dir}/data"/>
+  <property name="test.build.models.sentences.dir" location="${test.build.models.dir}/train"/>
+  <property name="test.opennlp.model" value="opennlp-langdetect.eng-swe-spa-rus-deu.bin"/>
+
+  <path id="opennlp.jars">
+    <fileset dir="lib" includes="opennlp*.jar"/>
+  </path>
+  
   <path id="classpath">
     <fileset dir="../extraction/lib" excludes="${common.classpath.excludes}"/>
     <fileset dir="lib" excludes="${common.classpath.excludes}"/>
@@ -39,4 +50,53 @@
   </target>
 
   <target name="compile-core" depends="resolve-extraction-libs,solr-contrib-build.compile-core"/>
+
+  <!--
+  Create test models using data for five languages from the Leipzig corpora.
+  See http://opennlp.apache.org/docs/1.8.3/manual/opennlp.html#tools.langdetect.training.leipzig
+  -->
+  <target name="train-test-models" description="Train small test models for unit tests" depends="resolve">
+    <download-leipzig language.code="eng"/> 
+    <download-leipzig language.code="swe"/>
+    <download-leipzig language.code="spa"/>
+    <download-leipzig language.code="rus"/>
+    <download-leipzig language.code="deu"/>
+
+    <echo message="Train OpenNLP test model over data from the Leipzig corpora"/>
+    <java classname="opennlp.tools.cmdline.CLI" classpathref="opennlp.jars" fork="true" failonerror="true">
+      <arg value="LanguageDetectorTrainer.leipzig"/>
+
+      <arg value="-model"/>
+      <arg value="${test.model.dir}/${test.opennlp.model}"/>
+
+      <arg value="-params"/>
+      <arg value="${tests.userdir}/opennlp.langdetect.trainer.params.txt"/>
+
+      <arg value="-sentencesDir"/> 
+      <arg value="${test.build.models.sentences.dir}"/>
+      
+      <arg value="-sentencesPerSample"/>
+      <arg value="3"/>  
+      
+      <arg value="-samplesPerLanguage"/>
+      <arg value="10000"/>
+    </java>
+  </target>
+
+  <macrodef name="download-leipzig">
+    <attribute name="language.code"/>
+    <attribute name="leipzig.tarball" default="@{language.code}_news_2007_30K.tar.gz"/>
+    <sequential>
+      <mkdir dir="${test.build.models.data.dir}"/>
+      <get src="${test.leipzig.folder.link}/@{leipzig.tarball}" dest="${test.build.models.data.dir}"/>
+      <untar compression="gzip" src="${test.build.models.data.dir}/@{leipzig.tarball}"
+             dest="${test.build.models.sentences.dir}">
+        <patternset>
+          <include name="*-sentences.txt"/>
+        </patternset>
+      </untar>
+    </sequential>
+  </macrodef>
+
+  <target name="regenerate" depends="train-test-models"/>
 </project>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/ivy.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/ivy.xml b/solr/contrib/langid/ivy.xml
index 88dc628..04c6b25 100644
--- a/solr/contrib/langid/ivy.xml
+++ b/solr/contrib/langid/ivy.xml
@@ -25,6 +25,7 @@
   <dependencies>
     <dependency org="com.cybozu.labs" name="langdetect" rev="${/com.cybozu.labs/langdetect}" conf="compile"/>
     <dependency org="net.arnx" name="jsonic" rev="${/net.arnx/jsonic}" conf="compile"/>
+    <dependency org="org.apache.opennlp" name="opennlp-tools" rev="${/org.apache.opennlp/opennlp-tools}" conf="compile"/>
 
     <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/>
   </dependencies>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
index a8d6523..3679905 100644
--- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
+++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
@@ -33,6 +33,7 @@ import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -399,4 +400,67 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro
     this.enabled = enabled;
   }
 
+
+
+  /**
+   * Concatenates content from multiple fields
+   */
+  protected String concatFields(SolrInputDocument doc) {
+    StringBuilder sb = new StringBuilder(getExpectedSize(doc, inputFields));
+    for (String fieldName : inputFields) {
+      log.debug("Appending field " + fieldName);
+      if (doc.containsKey(fieldName)) {
+        Collection<Object> fieldValues = doc.getFieldValues(fieldName);
+        if (fieldValues != null) {
+          for (Object content : fieldValues) {
+            if (content instanceof String) {
+              String stringContent = (String) content;
+              if (stringContent.length() > maxFieldValueChars) {
+                sb.append(stringContent.substring(0, maxFieldValueChars));
+              } else {
+                sb.append(stringContent);
+              }
+              sb.append(" ");
+              if (sb.length() > maxTotalChars) {
+                sb.setLength(maxTotalChars);
+                break;
+              }
+            } else {
+              log.warn("Field " + fieldName + " not a String value, not including in detection");
+            }
+          }
+        }
+      }
+    }
+    return sb.toString();
+  }
+
+  /**
+   * Calculate expected string size.
+   *
+   * @param doc           solr input document
+   * @param fields        fields to select
+   * @return expected size of string value
+   */
+  private int getExpectedSize(SolrInputDocument doc, String[] fields) {
+    int docSize = 0;
+    for (String field : fields) {
+      if (doc.containsKey(field)) {
+        Collection<Object> contents = doc.getFieldValues(field);
+        if (contents != null) {
+          for (Object content : contents) {
+            if (content instanceof String) {
+              docSize += Math.min(((String) content).length(), maxFieldValueChars);
+            }
+          }
+
+          if (docSize > maxTotalChars) {
+            docSize = maxTotalChars;
+            break;
+          }
+        }
+      }
+    }
+    return docSize;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessor.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessor.java
new file mode 100644
index 0000000..83f4fe4
--- /dev/null
+++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessor.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.update.processor;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import opennlp.tools.langdetect.Language;
+import opennlp.tools.langdetect.LanguageDetectorME;
+import opennlp.tools.langdetect.LanguageDetectorModel;
+
+/**
+ * Identifies the language of a set of input fields using <a href="https://opennlp.apache.org/">Apache OpenNLP</a>.
+ * <p>
+ * See "Language Detector" section of
+ * <a href="https://opennlp.apache.org/docs/1.8.3/manual/opennlp.html">https://opennlp.apache.org/docs/1.8.3/manual/opennlp.html</a>
+ */
+public class OpenNLPLangDetectUpdateProcessor extends LanguageIdentifierUpdateProcessor {
+
+  private final LanguageDetectorModel model;
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  /** Maps ISO 639-3 (3-letter language code) to ISO 639-1 (2-letter language code) */
+  private static final Map<String,String> ISO639_MAP = make_ISO639_map();
+  
+  public OpenNLPLangDetectUpdateProcessor(SolrQueryRequest req, SolrQueryResponse rsp,
+      UpdateRequestProcessor next, LanguageDetectorModel model) {
+    super(req, rsp, next);
+    this.model = model;
+  }
+
+  @Override
+  protected List<DetectedLanguage> detectLanguage(SolrInputDocument doc) {
+    List<DetectedLanguage> languages = new ArrayList<>();
+    String content = concatFields(doc);
+    if (content.length() != 0) {
+      LanguageDetectorME ldme = new LanguageDetectorME(model);
+      Language[] langs = ldme.predictLanguages(content);
+      for(Language language: langs){
+        languages.add(new DetectedLanguage(ISO639_MAP.get(language.getLang()), language.getConfidence()));
+      }
+    } else {
+      log.debug("No input text to detect language from, returning empty list");
+    }
+    return languages;
+  }
+
+  private static Map<String,String> make_ISO639_map() {
+    Map<String,String> map = new HashMap<>();
+    for (String lang : Locale.getISOLanguages()) {
+      Locale locale = new Locale(lang);
+      map.put(locale.getISO3Language(), locale.getLanguage());
+    }
+    return map;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactory.java
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactory.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactory.java
new file mode 100644
index 0000000..dfbdcbd
--- /dev/null
+++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactory.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.update.processor;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrResourceLoader;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.util.SolrPluginUtils;
+import org.apache.solr.util.plugin.SolrCoreAware;
+
+import opennlp.tools.langdetect.LanguageDetectorModel;
+
+/**
+ * Identifies the language of a set of input fields using <a href="https://opennlp.apache.org/">Apache OpenNLP</a>.
+ * <p>
+ * The UpdateProcessorChain config entry can take a number of parameters
+ * which may also be passed as HTTP parameters on the update request
+ * and override the defaults. Here is the simplest processor config possible:
+ * 
+ * <pre class="prettyprint" >
+ * &lt;processor class=&quot;org.apache.solr.update.processor.OpenNLPLangDetectUpdateProcessorFactory&quot;&gt;
+ *   &lt;str name=&quot;langid.fl&quot;&gt;title,text&lt;/str&gt;
+ *   &lt;str name=&quot;langid.langField&quot;&gt;language_s&lt;/str&gt;
+ *   &lt;str name="langid.model"&gt;langdetect-183.bin&lt;/str&gt;
+ * &lt;/processor&gt;
+ * </pre>
+ * See <a href="http://wiki.apache.org/solr/LanguageDetection">http://wiki.apache.org/solr/LanguageDetection</a>
+ */
+public class OpenNLPLangDetectUpdateProcessorFactory extends UpdateRequestProcessorFactory
+  implements SolrCoreAware {
+
+  private static final String MODEL_PARAM = "langid.model";
+  private String modelFile;
+  private LanguageDetectorModel model;
+  protected SolrParams defaults;
+  protected SolrParams appends;
+  protected SolrParams invariants;
+  private SolrResourceLoader solrResourceLoader;
+
+  @Override
+  public void init( NamedList args )
+  {
+    if (args != null) {
+      Object o;
+      o = args.get("defaults");
+      if (o != null && o instanceof NamedList) {
+        defaults = SolrParams.toSolrParams((NamedList) o);
+      } else {
+        defaults = SolrParams.toSolrParams(args);
+      }
+      o = args.get("appends");
+      if (o != null && o instanceof NamedList) {
+        appends = SolrParams.toSolrParams((NamedList) o);
+      }
+      o = args.get("invariants");
+      if (o != null && o instanceof NamedList) {
+        invariants = SolrParams.toSolrParams((NamedList) o);
+      }
+
+      // Look for model filename in invariants, then in args, then defaults
+      if (invariants != null) {
+        modelFile = invariants.get(MODEL_PARAM);
+      }
+      if (modelFile == null) {
+        o = args.get(MODEL_PARAM);
+        if (o != null && o instanceof String) {
+          modelFile = (String)o;
+        } else {
+          modelFile = defaults.get(MODEL_PARAM);
+          if (modelFile == null) {
+            throw new RuntimeException("Couldn't load language model, will return empty languages always!");
+          }
+        }
+      }
+    }
+  }
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor next) {
+    // Process defaults, appends and invariants if we got a request
+    if (req != null) {
+      SolrPluginUtils.setDefaults(req, defaults, appends, invariants);
+    }
+    return new OpenNLPLangDetectUpdateProcessor(req, rsp, next, model);
+  }
+
+  private void loadModel() throws IOException {
+    InputStream is = null;
+    try{
+      if (modelFile != null) {
+        is = solrResourceLoader.openResource(modelFile);
+        model = new LanguageDetectorModel(is);
+      }
+    }
+    finally{
+      IOUtils.closeQuietly(is);
+    }
+  }
+
+  @Override
+  public void inform(SolrCore core){
+    solrResourceLoader = core.getResourceLoader();
+    try {
+      loadModel();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java
index df0e5f7..5c8146d 100644
--- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java
+++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java
@@ -28,8 +28,6 @@ import org.apache.solr.common.SolrInputDocument;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.Collection;
-
 /**
  * Identifies the language of a set of input fields using Tika's
  * LanguageIdentifier.
@@ -67,67 +65,4 @@ public class TikaLanguageIdentifierUpdateProcessor extends LanguageIdentifierUpd
     }
     return languages;
   }
-
-
-  /**
-   * Concatenates content from multiple fields
-   */
-  protected String concatFields(SolrInputDocument doc) {
-    StringBuilder sb = new StringBuilder(getExpectedSize(doc, inputFields));
-    for (String fieldName : inputFields) {
-      log.debug("Appending field " + fieldName);
-      if (doc.containsKey(fieldName)) {
-        Collection<Object> fieldValues = doc.getFieldValues(fieldName);
-        if (fieldValues != null) {
-          for (Object content : fieldValues) {
-            if (content instanceof String) {
-              String stringContent = (String) content;
-              if (stringContent.length() > maxFieldValueChars) {
-                sb.append(stringContent.substring(0, maxFieldValueChars));
-              } else {
-                sb.append(stringContent);
-}
-              sb.append(" ");
-              if (sb.length() > maxTotalChars) {
-                sb.setLength(maxTotalChars);
-                break;
-              }
-            } else {
-              log.warn("Field " + fieldName + " not a String value, not including in detection");
-            }
-          }
-        }
-      }
-    }
-    return sb.toString();
-  }
-
-  /**
-   * Calculate expected string size.
-   *
-   * @param doc           solr input document
-   * @param fields        fields to select
-   * @return expected size of string value
-   */
-  private int getExpectedSize(SolrInputDocument doc, String[] fields) {
-    int docSize = 0;
-    for (String field : fields) {
-      if (doc.containsKey(field)) {
-        Collection<Object> contents = doc.getFieldValues(field);
-        if (contents != null) {
-          for (Object content : contents) {
-            if (content instanceof String) {
-              docSize += Math.min(((String) content).length(), maxFieldValueChars);
-            }
-          }
-
-          if (docSize > maxTotalChars) {
-            docSize = maxTotalChars;
-            break;
-          }
-        }
-      }
-    }
-    return docSize;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/opennlp-langdetect.eng-swe-spa-rus-deu.bin
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/opennlp-langdetect.eng-swe-spa-rus-deu.bin b/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/opennlp-langdetect.eng-swe-spa-rus-deu.bin
new file mode 100644
index 0000000..ad584e6
Binary files /dev/null and b/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/opennlp-langdetect.eng-swe-spa-rus-deu.bin differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml b/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml
index 9ae54ad..01dbee9 100644
--- a/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml
+++ b/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml
@@ -57,11 +57,11 @@
 
   <requestHandler name="/update" class="solr.UpdateRequestHandler" >
     <lst name="defaults">
-      <str name="update.chain">lang_id</str>
+      <str name="update.chain">lang_id_tika</str>
     </lst>
   </requestHandler>  
 
-  <updateRequestProcessorChain name="lang_id">
+  <updateRequestProcessorChain name="lang_id_tika">
     <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
       <!-- Can take defaults, invariants and appends just like req handlers-->
       <lst name="defaults">
@@ -78,7 +78,7 @@
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
   
-    <updateRequestProcessorChain name="lang_id_alt">
+  <updateRequestProcessorChain name="lang_id_lang_detect">
     <processor class="org.apache.solr.update.processor.LangDetectLanguageIdentifierUpdateProcessorFactory">
       <!-- Can take defaults, invariants and appends just like req handlers-->
       <lst name="defaults">
@@ -94,5 +94,22 @@
     </processor>
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
-  
+
+  <updateRequestProcessorChain name="lang_id_opennlp">
+    <processor class="org.apache.solr.update.processor.OpenNLPLangDetectUpdateProcessorFactory">
+      <!-- Can take defaults, invariants and appends just like req handlers-->
+      <lst name="defaults">
+        <bool name="langid">true</bool>
+        <str name="langid.fl">name,subject</str>
+        <bool name="langid.map">true</bool>
+        <str name="langid.langField">language_s</str>
+        <str name="langid.langsField">language_sm</str>
+        <str name="langid.map.lcmap">th:thai</str>
+        <float name="threshold">0.3</float>
+        <str name="langid.model">opennlp-langdetect.eng-swe-spa-rus-deu.bin</str>
+      </lst>
+    </processor>
+    <processor class="solr.RunUpdateProcessorFactory" />
+  </updateRequestProcessorChain>
+
 </config>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/test-files/opennlp.langdetect.trainer.params.txt
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/test-files/opennlp.langdetect.trainer.params.txt b/solr/contrib/langid/src/test-files/opennlp.langdetect.trainer.params.txt
new file mode 100644
index 0000000..1ecec82
--- /dev/null
+++ b/solr/contrib/langid/src/test-files/opennlp.langdetect.trainer.params.txt
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Algorithm=PERCEPTRON
+Cutoff=0

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java b/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java
index b90f54a..21ecd7d 100644
--- a/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java
+++ b/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java
@@ -38,7 +38,11 @@ public abstract class LanguageIdentifierUpdateProcessorFactoryTestCase extends S
   public static void beforeClass() throws Exception {
     initCore("solrconfig-languageidentifier.xml", "schema.xml", getFile("langid/solr").getAbsolutePath());
     SolrCore core = h.getCore();
-    UpdateRequestProcessorChain chained = core.getUpdateProcessingChain("lang_id");
+    UpdateRequestProcessorChain chained = core.getUpdateProcessingChain("lang_id_tika");
+    assertNotNull(chained);
+    chained = core.getUpdateProcessingChain("lang_id_lang_detect");
+    assertNotNull(chained);
+    chained = core.getUpdateProcessingChain("lang_id_opennlp");
     assertNotNull(chained);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/contrib/langid/src/test/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactoryTest.java
----------------------------------------------------------------------
diff --git a/solr/contrib/langid/src/test/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactoryTest.java b/solr/contrib/langid/src/test/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactoryTest.java
new file mode 100644
index 0000000..7b95e6f
--- /dev/null
+++ b/solr/contrib/langid/src/test/org/apache/solr/update/processor/OpenNLPLangDetectUpdateProcessorFactoryTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.request.SolrQueryRequest;
+import org.junit.Test;
+
+public class OpenNLPLangDetectUpdateProcessorFactoryTest extends LanguageIdentifierUpdateProcessorFactoryTestCase {
+  private static final String TEST_MODEL = "opennlp-langdetect.eng-swe-spa-rus-deu.bin";
+  
+  @Override
+  protected OpenNLPLangDetectUpdateProcessor createLangIdProcessor(ModifiableSolrParams parameters) throws Exception {
+    if (parameters.get("langid.model") == null) { // handle superclass tests that don't provide the model filename
+      parameters.set("langid.model", TEST_MODEL);
+    }
+    if (parameters.get("langid.threshold") == null) { // handle superclass tests that don't provide confidence threshold
+      parameters.set("langid.threshold", "0.3");
+    }
+    SolrQueryRequest req = _parser.buildRequestFrom(h.getCore(), new ModifiableSolrParams(), null);
+    OpenNLPLangDetectUpdateProcessorFactory factory = new OpenNLPLangDetectUpdateProcessorFactory();
+    factory.init(parameters.toNamedList());
+    factory.inform(h.getCore());
+    return (OpenNLPLangDetectUpdateProcessor)factory.getInstance(req, resp, null);
+  }
+
+  // this one actually works better it seems with short docs
+  @Override
+  protected SolrInputDocument tooShortDoc() {
+    SolrInputDocument doc = new SolrInputDocument();
+    doc.addField("text", "");
+    return doc;
+  }
+
+  @Test @Override
+  public void testLangIdGlobal() throws Exception {
+    ModifiableSolrParams parameters = new ModifiableSolrParams();
+    parameters.add("langid.fl", "name,subject");
+    parameters.add("langid.langField", "language_s");
+    parameters.add("langid.model", TEST_MODEL);
+    parameters.add("langid.threshold", "0.3");
+    liProcessor = createLangIdProcessor(parameters);
+
+    assertLang("en", "id", "1en", "name", "Lucene", "subject", "Apache Lucene is a free/open source information retrieval software library, originally created in Java by Doug Cutting. It is supported by the Apache Software Foundation and is released under the Apache Software License.");
+    assertLang("sv", "id", "2sv", "name", "Maven", "subject", "Apache Maven är ett verktyg utvecklat av Apache Software Foundation och används inom systemutveckling av datorprogram i programspråket Java. Maven används för att automatiskt paketera (bygga) programfilerna till en distribuerbar enhet. Maven används inom samma område som Apache Ant men dess byggfiler är deklarativa till skillnad ifrån Ants skriptbaserade.");
+    assertLang("es", "id", "3es", "name", "Lucene", "subject", "Lucene es un API de código abierto para recuperación de información, originalmente implementada en Java por Doug Cutting. Está apoyado por el Apache Software Foundation y se distribuye bajo la Apache Software License. Lucene tiene versiones para otros lenguajes incluyendo Delphi, Perl, C#, C++, Python, Ruby y PHP.");
+    assertLang("ru", "id", "4ru", "name", "Lucene", "subject", "The Apache Lucene — это свободная библиотека для высокоскоростного полнотекстового поиска, написанная на Java. Может быть использована для поиска в интернете и других областях компьютерной лингвистики (аналитическая философия).");
+    assertLang("de", "id", "5de", "name", "Lucene", "subject", "Lucene ist ein Freie-Software-Projekt der Apache Software Foundation, das eine Suchsoftware erstellt. Durch die hohe Leistungsfähigkeit und Skalierbarkeit können die Lucene-Werkzeuge für beliebige Projektgrößen und Anforderungen eingesetzt werden. So setzt beispielsweise Wikipedia Lucene für die Volltextsuche ein. Zudem verwenden die beiden Desktop-Suchprogramme Beagle und Strigi eine C#- bzw. C++- Portierung von Lucene als Indexer.");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/03095ce4/solr/solr-ref-guide/src/detecting-languages-during-indexing.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/detecting-languages-during-indexing.adoc b/solr/solr-ref-guide/src/detecting-languages-during-indexing.adoc
index 12e8804..7caccb7 100644
--- a/solr/solr-ref-guide/src/detecting-languages-during-indexing.adoc
+++ b/solr/solr-ref-guide/src/detecting-languages-during-indexing.adoc
@@ -18,12 +18,13 @@
 
 Solr can identify languages and map text to language-specific fields during indexing using the `langid` UpdateRequestProcessor.
 
-Solr supports two implementations of this feature:
+Solr supports three implementations of this feature:
 
 * Tika's language detection feature: http://tika.apache.org/0.10/detection.html
 * LangDetect language detection: https://github.com/shuyo/language-detection
+* OpenNLP language detection: http://opennlp.apache.org/docs/1.8.4/manual/opennlp.html#tools.langdetect 
 
-You can see a comparison between the two implementations here: http://blog.mikemccandless.com/2011/10/accuracy-and-performance-of-googles.html. In general, the LangDetect implementation supports more languages with higher performance.
+You can see a comparison between the Tika and LangDetect implementations here: http://blog.mikemccandless.com/2011/10/accuracy-and-performance-of-googles.html. In general, the LangDetect implementation supports more languages with higher performance.
 
 For specific information on each of these language identification implementations, including a list of supported languages for each, see the relevant project websites.
 
@@ -61,6 +62,30 @@ Here is an example of a minimal LangDetect `langid` configuration in `solrconfig
 </processor>
 ----
 
+=== Configuring OpenNLP Language Detection
+
+Here is an example of a minimal OpenNLP `langid` configuration in `solrconfig.xml`:
+
+[source,xml]
+----
+<processor class="org.apache.solr.update.processor.OpenNLPLangDetectUpdateProcessorFactory">
+  <lst name="defaults">
+    <str name="langid.fl">title,subject,text,keywords</str>
+    <str name="langid.langField">language_s</str>
+    <str name="langid.model">langdetect-183.bin</str>
+  </lst>
+</processor>
+----
+
+==== OpenNLP-specific parameters 
+
+`langid.model`::
+An OpenNLP language detection model. The OpenNLP project provides a pre-trained 103 language model on the http://opennlp.apache.org/models.html[OpenNLP site's model dowload page]. Model training instructions are provided on the http://opennlp.apache.org/docs/1.8.4/manual/opennlp.html#tools.langdetect[OpenNLP website]. This parameter is required. 
+
+==== OpenNLP language codes
+
+`OpenNLPLangDetectUpdateProcessor` automatically converts the 3-letter ISO 639-3 codes detected by the OpenNLP model into 2-letter ISO 639-1 codes.
+
 == langid Parameters
 
 As previously mentioned, both implementations of the `langid` UpdateRequestProcessor take the same parameters.


[21/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
deleted file mode 100644
index 426c879..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
+++ /dev/null
@@ -1,1003 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrCloseable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.handler.component.ShardResponse;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-/**
- * A {@link OverseerMessageHandler} that handles Collections API related
- * overseer messages.
- */
-public class OverseerCollectionMessageHandler implements OverseerMessageHandler, SolrCloseable {
-
-  public static final String NUM_SLICES = "numShards";
-
-  static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
-  public static final String CREATE_NODE_SET_SHUFFLE = CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
-  public static final String CREATE_NODE_SET_EMPTY = "EMPTY";
-  public static final String CREATE_NODE_SET = CollectionAdminParams.CREATE_NODE_SET_PARAM;
-
-  public static final String ROUTER = "router";
-
-  public static final String SHARDS_PROP = "shards";
-
-  public static final String REQUESTID = "requestid";
-
-  public static final String COLL_CONF = "collection.configName";
-
-  public static final String COLL_PROP_PREFIX = "property.";
-
-  public static final String ONLY_IF_DOWN = "onlyIfDown";
-
-  public static final String SHARD_UNIQUE = "shardUnique";
-
-  public static final String ONLY_ACTIVE_NODES = "onlyactivenodes";
-
-  static final String SKIP_CREATE_REPLICA_IN_CLUSTER_STATE = "skipCreateReplicaInClusterState";
-
-  public static final Map<String, Object> COLL_PROPS = Collections.unmodifiableMap(makeMap(
-      ROUTER, DocRouter.DEFAULT_NAME,
-      ZkStateReader.REPLICATION_FACTOR, "1",
-      ZkStateReader.NRT_REPLICAS, "1",
-      ZkStateReader.TLOG_REPLICAS, "0",
-      ZkStateReader.PULL_REPLICAS, "0",
-      ZkStateReader.MAX_SHARDS_PER_NODE, "1",
-      ZkStateReader.AUTO_ADD_REPLICAS, "false",
-      DocCollection.RULE, null,
-      POLICY, null,
-      SNITCH, null));
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  Overseer overseer;
-  ShardHandlerFactory shardHandlerFactory;
-  String adminPath;
-  ZkStateReader zkStateReader;
-  SolrCloudManager cloudManager;
-  String myId;
-  Stats stats;
-  TimeSource timeSource;
-
-  // Set that tracks collections that are currently being processed by a running task.
-  // This is used for handling mutual exclusion of the tasks.
-
-  final private LockTree lockTree = new LockTree();
-  ExecutorService tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
-      new SynchronousQueue<>(),
-      new DefaultSolrThreadFactory("OverseerCollectionMessageHandlerThreadFactory"));
-
-  static final Random RANDOM;
-  static {
-    // We try to make things reproducible in the context of our tests by initializing the random instance
-    // based on the current seed
-    String seed = System.getProperty("tests.seed");
-    if (seed == null) {
-      RANDOM = new Random();
-    } else {
-      RANDOM = new Random(seed.hashCode());
-    }
-  }
-
-  final Map<CollectionAction, Cmd> commandMap;
-
-  private volatile boolean isClosed;
-
-  public OverseerCollectionMessageHandler(ZkStateReader zkStateReader, String myId,
-                                        final ShardHandlerFactory shardHandlerFactory,
-                                        String adminPath,
-                                        Stats stats,
-                                        Overseer overseer,
-                                        OverseerNodePrioritizer overseerPrioritizer) {
-    this.zkStateReader = zkStateReader;
-    this.shardHandlerFactory = shardHandlerFactory;
-    this.adminPath = adminPath;
-    this.myId = myId;
-    this.stats = stats;
-    this.overseer = overseer;
-    this.cloudManager = overseer.getSolrCloudManager();
-    this.timeSource = cloudManager.getTimeSource();
-    this.isClosed = false;
-    commandMap = new ImmutableMap.Builder<CollectionAction, Cmd>()
-        .put(REPLACENODE, new ReplaceNodeCmd(this))
-        .put(DELETENODE, new DeleteNodeCmd(this))
-        .put(BACKUP, new BackupCmd(this))
-        .put(RESTORE, new RestoreCmd(this))
-        .put(CREATESNAPSHOT, new CreateSnapshotCmd(this))
-        .put(DELETESNAPSHOT, new DeleteSnapshotCmd(this))
-        .put(SPLITSHARD, new SplitShardCmd(this))
-        .put(ADDROLE, new OverseerRoleCmd(this, ADDROLE, overseerPrioritizer))
-        .put(REMOVEROLE, new OverseerRoleCmd(this, REMOVEROLE, overseerPrioritizer))
-        .put(MOCK_COLL_TASK, this::mockOperation)
-        .put(MOCK_SHARD_TASK, this::mockOperation)
-        .put(MOCK_REPLICA_TASK, this::mockOperation)
-        .put(MIGRATESTATEFORMAT, this::migrateStateFormat)
-        .put(CREATESHARD, new CreateShardCmd(this))
-        .put(MIGRATE, new MigrateCmd(this))
-        .put(CREATE, new CreateCollectionCmd(this))
-        .put(MODIFYCOLLECTION, this::modifyCollection)
-        .put(ADDREPLICAPROP, this::processReplicaAddPropertyCommand)
-        .put(DELETEREPLICAPROP, this::processReplicaDeletePropertyCommand)
-        .put(BALANCESHARDUNIQUE, this::balanceProperty)
-        .put(REBALANCELEADERS, this::processRebalanceLeaders)
-        .put(RELOAD, this::reloadCollection)
-        .put(DELETE, new DeleteCollectionCmd(this))
-        .put(CREATEALIAS, new CreateAliasCmd(this))
-        .put(DELETEALIAS, new DeleteAliasCmd(this))
-        .put(ROUTEDALIAS_CREATECOLL, new RoutedAliasCreateCollectionCmd(this))
-        .put(OVERSEERSTATUS, new OverseerStatusCmd(this))
-        .put(DELETESHARD, new DeleteShardCmd(this))
-        .put(DELETEREPLICA, new DeleteReplicaCmd(this))
-        .put(ADDREPLICA, new AddReplicaCmd(this))
-        .put(MOVEREPLICA, new MoveReplicaCmd(this))
-        .put(UTILIZENODE, new UtilizeNodeCmd(this))
-        .build()
-    ;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public SolrResponse processMessage(ZkNodeProps message, String operation) {
-    log.debug("OverseerCollectionMessageHandler.processMessage : {} , {}", operation, message);
-
-    NamedList results = new NamedList();
-    try {
-      CollectionAction action = getCollectionAction(operation);
-      Cmd command = commandMap.get(action);
-      if (command != null) {
-        command.call(cloudManager.getClusterStateProvider().getClusterState(), message, results);
-      } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
-            + operation);
-      }
-    } catch (Exception e) {
-      String collName = message.getStr("collection");
-      if (collName == null) collName = message.getStr(NAME);
-
-      if (collName == null) {
-        SolrException.log(log, "Operation " + operation + " failed", e);
-      } else  {
-        SolrException.log(log, "Collection: " + collName + " operation: " + operation
-            + " failed", e);
-      }
-
-      results.add("Operation " + operation + " caused exception:", e);
-      SimpleOrderedMap nl = new SimpleOrderedMap();
-      nl.add("msg", e.getMessage());
-      nl.add("rspCode", e instanceof SolrException ? ((SolrException)e).code() : -1);
-      results.add("exception", nl);
-    }
-    return new OverseerSolrResponse(results);
-  }
-
-  @SuppressForbidden(reason = "Needs currentTimeMillis for mock requests")
-  private void mockOperation(ClusterState state, ZkNodeProps message, NamedList results) throws InterruptedException {
-    //only for test purposes
-    Thread.sleep(message.getInt("sleep", 1));
-    log.info("MOCK_TASK_EXECUTED time {} data {}", System.currentTimeMillis(), Utils.toJSONString(message));
-    results.add("MOCK_FINISHED", System.currentTimeMillis());
-  }
-
-  private CollectionAction getCollectionAction(String operation) {
-    CollectionAction action = CollectionAction.get(operation);
-    if (action == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
-    }
-    return action;
-  }
-
-  private void reloadCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.RELOAD.toString());
-
-    String asyncId = message.getStr(ASYNC);
-    Map<String, String> requestMap = null;
-    if (asyncId != null) {
-      requestMap = new HashMap<>();
-    }
-    collectionCmd(message, params, results, Replica.State.ACTIVE, asyncId, requestMap);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processRebalanceLeaders(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP,
-        CORE_NODE_NAME_PROP, BASE_URL_PROP, REJOIN_AT_HEAD_PROP);
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(COLLECTION_PROP, message.getStr(COLLECTION_PROP));
-    params.set(SHARD_ID_PROP, message.getStr(SHARD_ID_PROP));
-    params.set(REJOIN_AT_HEAD_PROP, message.getStr(REJOIN_AT_HEAD_PROP));
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.REJOINLEADERELECTION.toString());
-    params.set(CORE_NAME_PROP, message.getStr(CORE_NAME_PROP));
-    params.set(CORE_NODE_NAME_PROP, message.getStr(CORE_NODE_NAME_PROP));
-    params.set(ELECTION_NODE_PROP, message.getStr(ELECTION_NODE_PROP));
-    params.set(BASE_URL_PROP, message.getStr(BASE_URL_PROP));
-
-    String baseUrl = message.getStr(BASE_URL_PROP);
-    ShardRequest sreq = new ShardRequest();
-    sreq.nodeName = message.getStr(ZkStateReader.CORE_NAME_PROP);
-    // yes, they must use same admin handler path everywhere...
-    params.set("qt", adminPath);
-    sreq.purpose = ShardRequest.PURPOSE_PRIVATE;
-    sreq.shards = new String[] {baseUrl};
-    sreq.actualShards = sreq.shards;
-    sreq.params = params;
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-    shardHandler.submit(sreq, baseUrl, sreq.params);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processReplicaAddPropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP, PROPERTY_VALUE_PROP);
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICAPROP.toLower());
-    propMap.putAll(message.getProperties());
-    ZkNodeProps m = new ZkNodeProps(propMap);
-    inQueue.offer(Utils.toJSON(m));
-  }
-
-  private void processReplicaDeletePropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP);
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, DELETEREPLICAPROP.toLower());
-    propMap.putAll(message.getProperties());
-    ZkNodeProps m = new ZkNodeProps(propMap);
-    inQueue.offer(Utils.toJSON(m));
-  }
-
-  private void balanceProperty(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) || StringUtils.isBlank(message.getStr(PROPERTY_PROP))) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "The '" + COLLECTION_PROP + "' and '" + PROPERTY_PROP +
-              "' parameters are required for the BALANCESHARDUNIQUE operation, no action taken");
-    }
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, BALANCESHARDUNIQUE.toLower());
-    propMap.putAll(message.getProperties());
-    inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-  }
-
-  /**
-   * Walks the tree of collection status to verify that any replicas not reporting a "down" status is
-   * on a live node, if any replicas reporting their status as "active" but the node is not live is
-   * marked as "down"; used by CLUSTERSTATUS.
-   * @param liveNodes List of currently live node names.
-   * @param collectionProps Map of collection status information pulled directly from ZooKeeper.
-   */
-
-  @SuppressWarnings("unchecked")
-  protected void crossCheckReplicaStateWithLiveNodes(List<String> liveNodes, NamedList<Object> collectionProps) {
-    Iterator<Map.Entry<String,Object>> colls = collectionProps.iterator();
-    while (colls.hasNext()) {
-      Map.Entry<String,Object> next = colls.next();
-      Map<String,Object> collMap = (Map<String,Object>)next.getValue();
-      Map<String,Object> shards = (Map<String,Object>)collMap.get("shards");
-      for (Object nextShard : shards.values()) {
-        Map<String,Object> shardMap = (Map<String,Object>)nextShard;
-        Map<String,Object> replicas = (Map<String,Object>)shardMap.get("replicas");
-        for (Object nextReplica : replicas.values()) {
-          Map<String,Object> replicaMap = (Map<String,Object>)nextReplica;
-          if (Replica.State.getState((String) replicaMap.get(ZkStateReader.STATE_PROP)) != Replica.State.DOWN) {
-            // not down, so verify the node is live
-            String node_name = (String)replicaMap.get(ZkStateReader.NODE_NAME_PROP);
-            if (!liveNodes.contains(node_name)) {
-              // node is not live, so this replica is actually down
-              replicaMap.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Get collection status from cluster state.
-   * Can return collection status by given shard name.
-   *
-   *
-   * @param collection collection map parsed from JSON-serialized {@link ClusterState}
-   * @param name  collection name
-   * @param requestedShards a set of shards to be returned in the status.
-   *                        An empty or null values indicates <b>all</b> shards.
-   * @return map of collection properties
-   */
-  @SuppressWarnings("unchecked")
-  private Map<String, Object> getCollectionStatus(Map<String, Object> collection, String name, Set<String> requestedShards) {
-    if (collection == null)  {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
-    }
-    if (requestedShards == null || requestedShards.isEmpty()) {
-      return collection;
-    } else {
-      Map<String, Object> shards = (Map<String, Object>) collection.get("shards");
-      Map<String, Object>  selected = new HashMap<>();
-      for (String selectedShard : requestedShards) {
-        if (!shards.containsKey(selectedShard)) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " shard: " + selectedShard + " not found");
-        }
-        selected.put(selectedShard, shards.get(selectedShard));
-        collection.put("shards", selected);
-      }
-      return collection;
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws Exception {
-    ((DeleteReplicaCmd) commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, message, results, onComplete);
-
-  }
-
-  boolean waitForCoreNodeGone(String collectionName, String shard, String replicaName, int timeoutms) throws InterruptedException {
-    TimeOut timeout = new TimeOut(timeoutms, TimeUnit.MILLISECONDS, timeSource);
-    while (! timeout.hasTimedOut()) {
-      timeout.sleep(100);
-      DocCollection docCollection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (docCollection == null) { // someone already deleted the collection
-        return true;
-      }
-      Slice slice = docCollection.getSlice(shard);
-      if(slice == null || slice.getReplica(replicaName) == null) {
-        return true;
-      }
-    }
-    // replica still exists after the timeout
-    return false;
-  }
-
-  void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws Exception {
-    ZkNodeProps m = new ZkNodeProps(
-        Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
-        ZkStateReader.CORE_NAME_PROP, core,
-        ZkStateReader.NODE_NAME_PROP, replica.getStr(ZkStateReader.NODE_NAME_PROP),
-        ZkStateReader.COLLECTION_PROP, collectionName,
-        ZkStateReader.CORE_NODE_NAME_PROP, replicaName,
-        ZkStateReader.BASE_URL_PROP, replica.getStr(ZkStateReader.BASE_URL_PROP));
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-  }
-
-  void checkRequired(ZkNodeProps message, String... props) {
-    for (String prop : props) {
-      if(message.get(prop) == null){
-        throw new SolrException(ErrorCode.BAD_REQUEST, StrUtils.join(Arrays.asList(props),',') +" are required params" );
-      }
-    }
-
-  }
-
-  //TODO should we not remove in the next release ?
-  private void migrateStateFormat(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    final String collectionName = message.getStr(COLLECTION_PROP);
-
-    boolean firstLoop = true;
-    // wait for a while until the state format changes
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    while (! timeout.hasTimedOut()) {
-      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (collection == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + collectionName + " not found");
-      }
-      if (collection.getStateFormat() == 2) {
-        // Done.
-        results.add("success", new SimpleOrderedMap<>());
-        return;
-      }
-
-      if (firstLoop) {
-        // Actually queue the migration command.
-        firstLoop = false;
-        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, MIGRATESTATEFORMAT.toLower(), COLLECTION_PROP, collectionName);
-        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-      }
-      timeout.sleep(100);
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not migrate state format for collection: " + collectionName);
-  }
-
-  void commit(NamedList results, String slice, Replica parentShardLeader) {
-    log.debug("Calling soft commit to make sub shard updates visible");
-    String coreUrl = new ZkCoreNodeProps(parentShardLeader).getCoreUrl();
-    // HttpShardHandler is hard coded to send a QueryRequest hence we go direct
-    // and we force open a searcher so that we have documents to show upon switching states
-    UpdateResponse updateResponse = null;
-    try {
-      updateResponse = softCommit(coreUrl);
-      processResponse(results, null, coreUrl, updateResponse, slice, Collections.emptySet());
-    } catch (Exception e) {
-      processResponse(results, e, coreUrl, updateResponse, slice, Collections.emptySet());
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to call distrib softCommit on: " + coreUrl, e);
-    }
-  }
-
-
-  static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
-
-    try (HttpSolrClient client = new HttpSolrClient.Builder(url)
-        .withConnectionTimeout(30000)
-        .withSocketTimeout(120000)
-        .build()) {
-      UpdateRequest ureq = new UpdateRequest();
-      ureq.setParams(new ModifiableSolrParams());
-      ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
-      return ureq.process(client);
-    }
-  }
-
-  String waitForCoreNodeName(String collectionName, String msgNodeName, String msgCore) {
-    int retryCount = 320;
-    while (retryCount-- > 0) {
-      final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
-      if (docCollection != null && docCollection.getSlicesMap() != null) {
-        Map<String,Slice> slicesMap = docCollection.getSlicesMap();
-        for (Slice slice : slicesMap.values()) {
-          for (Replica replica : slice.getReplicas()) {
-            // TODO: for really large clusters, we could 'index' on this
-
-            String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
-            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-
-            if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
-              return replica.getName();
-            }
-          }
-        }
-      }
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
-  }
-
-  void waitForNewShard(String collectionName, String sliceName) throws KeeperException, InterruptedException {
-    log.debug("Waiting for slice {} of collection {} to be available", sliceName, collectionName);
-    RTimer timer = new RTimer();
-    int retryCount = 320;
-    while (retryCount-- > 0) {
-      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (collection == null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "Unable to find collection: " + collectionName + " in clusterstate");
-      }
-      Slice slice = collection.getSlice(sliceName);
-      if (slice != null) {
-        log.debug("Waited for {}ms for slice {} of collection {} to be available",
-            timer.getTime(), sliceName, collectionName);
-        return;
-      }
-      Thread.sleep(1000);
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR,
-        "Could not find new slice " + sliceName + " in collection " + collectionName
-            + " even after waiting for " + timer.getTime() + "ms"
-    );
-  }
-
-  DocRouter.Range intersect(DocRouter.Range a, DocRouter.Range b) {
-    if (a == null || b == null || !a.overlaps(b)) {
-      return null;
-    } else if (a.isSubsetOf(b))
-      return a;
-    else if (b.isSubsetOf(a))
-      return b;
-    else if (b.includes(a.max)) {
-      return new DocRouter.Range(b.min, a.max);
-    } else  {
-      return new DocRouter.Range(a.min, b.max);
-    }
-  }
-
-  void sendShardRequest(String nodeName, ModifiableSolrParams params,
-                        ShardHandler shardHandler, String asyncId,
-                        Map<String, String> requestMap) {
-    sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap, adminPath, zkStateReader);
-
-  }
-
-  public static void sendShardRequest(String nodeName, ModifiableSolrParams params, ShardHandler shardHandler,
-                                      String asyncId, Map<String, String> requestMap, String adminPath,
-                                      ZkStateReader zkStateReader) {
-    if (asyncId != null) {
-      String coreAdminAsyncId = asyncId + Math.abs(System.nanoTime());
-      params.set(ASYNC, coreAdminAsyncId);
-      requestMap.put(nodeName, coreAdminAsyncId);
-    }
-
-    ShardRequest sreq = new ShardRequest();
-    params.set("qt", adminPath);
-    sreq.purpose = 1;
-    String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
-    sreq.shards = new String[]{replica};
-    sreq.actualShards = sreq.shards;
-    sreq.nodeName = nodeName;
-    sreq.params = params;
-
-    shardHandler.submit(sreq, replica, sreq.params);
-  }
-
-  void addPropertyParams(ZkNodeProps message, ModifiableSolrParams params) {
-    // Now add the property.key=value pairs
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        params.set(key, message.getStr(key));
-      }
-    }
-  }
-
-  void addPropertyParams(ZkNodeProps message, Map<String, Object> map) {
-    // Now add the property.key=value pairs
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        map.put(key, message.getStr(key));
-      }
-    }
-  }
-
-
-  private void modifyCollection(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    
-    final String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    //the rest of the processing is based on writing cluster state properties
-    //remove the property here to avoid any errors down the pipeline due to this property appearing
-    String configName = (String) message.getProperties().remove(COLL_CONF);
-    
-    if(configName != null) {
-      validateConfigOrThrowSolrException(configName);
-      
-      boolean isLegacyCloud =  Overseer.isLegacy(zkStateReader);
-      createConfNode(cloudManager.getDistribStateManager(), configName, collectionName, isLegacyCloud);
-      reloadCollection(null, new ZkNodeProps(NAME, collectionName), results);
-    }
-    
-    overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    boolean areChangesVisible = true;
-    while (!timeout.hasTimedOut()) {
-      DocCollection collection = cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName);
-      areChangesVisible = true;
-      for (Map.Entry<String,Object> updateEntry : message.getProperties().entrySet()) {
-        String updateKey = updateEntry.getKey();
-        if (!updateKey.equals(ZkStateReader.COLLECTION_PROP)
-            && !updateKey.equals(Overseer.QUEUE_OPERATION)
-            && !collection.get(updateKey).equals(updateEntry.getValue())){
-          areChangesVisible = false;
-          break;
-        }
-      }
-      if (areChangesVisible) break;
-      timeout.sleep(100);
-    }
-
-    if (!areChangesVisible)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not modify collection " + message);
-  }
-
-  void cleanupCollection(String collectionName, NamedList results) throws Exception {
-    log.error("Cleaning up collection [" + collectionName + "]." );
-    Map<String, Object> props = makeMap(
-        Overseer.QUEUE_OPERATION, DELETE.toLower(),
-        NAME, collectionName);
-    commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-  }
-
-  Map<String, Replica> waitToSeeReplicasInState(String collectionName, Collection<String> coreNames) throws InterruptedException {
-    Map<String, Replica> result = new HashMap<>();
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    while (true) {
-      DocCollection coll = zkStateReader.getClusterState().getCollection(collectionName);
-      for (String coreName : coreNames) {
-        if (result.containsKey(coreName)) continue;
-        for (Slice slice : coll.getSlices()) {
-          for (Replica replica : slice.getReplicas()) {
-            if (coreName.equals(replica.getStr(ZkStateReader.CORE_NAME_PROP))) {
-              result.put(coreName, replica);
-              break;
-            }
-          }
-        }
-      }
-      
-      if (result.size() == coreNames.size()) {
-        return result;
-      } else {
-        log.debug("Expecting {} cores but found {}", coreNames, result);
-      }
-      if (timeout.hasTimedOut()) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out waiting to see all replicas: " + coreNames + " in cluster state. Last state: " + coll);
-      }
-      
-      Thread.sleep(100);
-    }
-  }
-
-  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws Exception {
-
-    return ((AddReplicaCmd) commandMap.get(ADDREPLICA)).addReplica(clusterState, message, results, onComplete);
-  }
-
-  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
-                        String asyncId, Map<String, String> requestMap) {
-    processResponses(results, shardHandler, abortOnError, msgOnError, asyncId, requestMap, Collections.emptySet());
-  }
-
-  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
-                                String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
-    //Processes all shard responses
-    ShardResponse srsp;
-    do {
-      srsp = shardHandler.takeCompletedOrError();
-      if (srsp != null) {
-        processResponse(results, srsp, okayExceptions);
-        Throwable exception = srsp.getException();
-        if (abortOnError && exception != null)  {
-          // drain pending requests
-          while (srsp != null)  {
-            srsp = shardHandler.takeCompletedOrError();
-          }
-          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, exception);
-        }
-      }
-    } while (srsp != null);
-
-    //If request is async wait for the core admin to complete before returning
-    if (asyncId != null) {
-      waitForAsyncCallsToComplete(requestMap, results);
-      requestMap.clear();
-    }
-  }
-
-
-  void validateConfigOrThrowSolrException(String configName) throws IOException, KeeperException, InterruptedException {
-    boolean isValid = cloudManager.getDistribStateManager().hasData(ZkConfigManager.CONFIGS_ZKNODE + "/" + configName);
-    if(!isValid) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Can not find the specified config set: " + configName);
-    }
-  }
-
-  /**
-   * This doesn't validate the config (path) itself and is just responsible for creating the confNode.
-   * That check should be done before the config node is created.
-   */
-  public static void createConfNode(DistribStateManager stateManager, String configName, String coll, boolean isLegacyCloud) throws IOException, AlreadyExistsException, BadVersionException, KeeperException, InterruptedException {
-    
-    if (configName != null) {
-      String collDir = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll;
-      log.debug("creating collections conf node {} ", collDir);
-      byte[] data = Utils.toJSON(makeMap(ZkController.CONFIGNAME_PROP, configName));
-      if (stateManager.hasData(collDir)) {
-        stateManager.setData(collDir, data, -1);
-      } else {
-        stateManager.makePath(collDir, data, CreateMode.PERSISTENT, false);
-      }
-    } else {
-      if(isLegacyCloud){
-        log.warn("Could not obtain config name");
-      } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST,"Unable to get config name");
-      }
-    }
-  }
-  
-  private void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
-                             NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap) {
-    collectionCmd( message, params, results, stateMatcher, asyncId, requestMap, Collections.emptySet());
-  }
-
-
-  void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
-                     NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
-    log.info("Executing Collection Cmd : " + params);
-    String collectionName = message.getStr(NAME);
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-    DocCollection coll = clusterState.getCollection(collectionName);
-    
-    for (Slice slice : coll.getSlices()) {
-      sliceCmd(clusterState, params, stateMatcher, slice, shardHandler, asyncId, requestMap);
-    }
-
-    processResponses(results, shardHandler, false, null, asyncId, requestMap, okayExceptions);
-
-  }
-
-  void sliceCmd(ClusterState clusterState, ModifiableSolrParams params, Replica.State stateMatcher,
-                Slice slice, ShardHandler shardHandler, String asyncId, Map<String, String> requestMap) {
-
-    for (Replica replica : slice.getReplicas()) {
-      if (clusterState.liveNodesContain(replica.getStr(ZkStateReader.NODE_NAME_PROP))
-          && (stateMatcher == null || Replica.State.getState(replica.getStr(ZkStateReader.STATE_PROP)) == stateMatcher)) {
-
-        // For thread safety, only simple clone the ModifiableSolrParams
-        ModifiableSolrParams cloneParams = new ModifiableSolrParams();
-        cloneParams.add(params);
-        cloneParams.set(CoreAdminParams.CORE, replica.getStr(ZkStateReader.CORE_NAME_PROP));
-
-        sendShardRequest(replica.getStr(ZkStateReader.NODE_NAME_PROP), cloneParams, shardHandler, asyncId, requestMap);
-      }
-    }
-  }
-  
-  private void processResponse(NamedList results, ShardResponse srsp, Set<String> okayExceptions) {
-    Throwable e = srsp.getException();
-    String nodeName = srsp.getNodeName();
-    SolrResponse solrResponse = srsp.getSolrResponse();
-    String shard = srsp.getShard();
-
-    processResponse(results, e, nodeName, solrResponse, shard, okayExceptions);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processResponse(NamedList results, Throwable e, String nodeName, SolrResponse solrResponse, String shard, Set<String> okayExceptions) {
-    String rootThrowable = null;
-    if (e instanceof RemoteSolrException) {
-      rootThrowable = ((RemoteSolrException) e).getRootThrowable();
-    }
-
-    if (e != null && (rootThrowable == null || !okayExceptions.contains(rootThrowable))) {
-      log.error("Error from shard: " + shard, e);
-
-      SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
-      if (failure == null) {
-        failure = new SimpleOrderedMap();
-        results.add("failure", failure);
-      }
-
-      failure.add(nodeName, e.getClass().getName() + ":" + e.getMessage());
-
-    } else {
-
-      SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
-      if (success == null) {
-        success = new SimpleOrderedMap();
-        results.add("success", success);
-      }
-
-      success.add(nodeName, solrResponse.getResponse());
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private void waitForAsyncCallsToComplete(Map<String, String> requestMap, NamedList results) {
-    for (String k:requestMap.keySet()) {
-      log.debug("I am Waiting for :{}/{}", k, requestMap.get(k));
-      results.add(requestMap.get(k), waitForCoreAdminAsyncCallToComplete(k, requestMap.get(k)));
-    }
-  }
-
-  private NamedList waitForCoreAdminAsyncCallToComplete(String nodeName, String requestId) {
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.REQUESTSTATUS.toString());
-    params.set(CoreAdminParams.REQUESTID, requestId);
-    int counter = 0;
-    ShardRequest sreq;
-    do {
-      sreq = new ShardRequest();
-      params.set("qt", adminPath);
-      sreq.purpose = 1;
-      String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
-      sreq.shards = new String[] {replica};
-      sreq.actualShards = sreq.shards;
-      sreq.params = params;
-
-      shardHandler.submit(sreq, replica, sreq.params);
-
-      ShardResponse srsp;
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          NamedList results = new NamedList();
-          processResponse(results, srsp, Collections.emptySet());
-          if (srsp.getSolrResponse().getResponse() == null) {
-            NamedList response = new NamedList();
-            response.add("STATUS", "failed");
-            return response;
-          }
-          
-          String r = (String) srsp.getSolrResponse().getResponse().get("STATUS");
-          if (r.equals("running")) {
-            log.debug("The task is still RUNNING, continuing to wait.");
-            try {
-              Thread.sleep(1000);
-            } catch (InterruptedException e) {
-              Thread.currentThread().interrupt();
-            }
-            continue;
-
-          } else if (r.equals("completed")) {
-            log.debug("The task is COMPLETED, returning");
-            return srsp.getSolrResponse().getResponse();
-          } else if (r.equals("failed")) {
-            // TODO: Improve this. Get more information.
-            log.debug("The task is FAILED, returning");
-            return srsp.getSolrResponse().getResponse();
-          } else if (r.equals("notfound")) {
-            log.debug("The task is notfound, retry");
-            if (counter++ < 5) {
-              try {
-                Thread.sleep(1000);
-              } catch (InterruptedException e) {
-              }
-              break;
-            }
-            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request for requestId: " + requestId + "" + srsp.getSolrResponse().getResponse().get("STATUS") +
-                "retried " + counter + "times");
-          } else {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request " + srsp.getSolrResponse().getResponse().get("STATUS"));
-          }
-        }
-      } while (srsp != null);
-    } while(true);
-  }
-
-  @Override
-  public String getName() {
-    return "Overseer Collection Message Handler";
-  }
-
-  @Override
-  public String getTimerName(String operation) {
-    return "collection_" + operation;
-  }
-
-  @Override
-  public String getTaskKey(ZkNodeProps message) {
-    return message.containsKey(COLLECTION_PROP) ?
-      message.getStr(COLLECTION_PROP) : message.getStr(NAME);
-  }
-
-
-  private long sessionId = -1;
-  private LockTree.Session lockSession;
-
-  @Override
-  public Lock lockTask(ZkNodeProps message, OverseerTaskProcessor.TaskBatch taskBatch) {
-    if (lockSession == null || sessionId != taskBatch.getId()) {
-      //this is always called in the same thread.
-      //Each batch is supposed to have a new taskBatch
-      //So if taskBatch changes we must create a new Session
-      // also check if the running tasks are empty. If yes, clear lockTree
-      // this will ensure that locks are not 'leaked'
-      if(taskBatch.getRunningTasks() == 0) lockTree.clear();
-      lockSession = lockTree.getSession();
-    }
-    return lockSession.lock(getCollectionAction(message.getStr(Overseer.QUEUE_OPERATION)),
-        Arrays.asList(
-            getTaskKey(message),
-            message.getStr(ZkStateReader.SHARD_ID_PROP),
-            message.getStr(ZkStateReader.REPLICA_PROP))
-
-    );
-  }
-
-
-  @Override
-  public void close() throws IOException {
-    this.isClosed = true;
-    if (tpe != null) {
-      if (!tpe.isShutdown()) {
-        ExecutorUtil.shutdownAndAwaitTermination(tpe);
-      }
-    }
-  }
-
-  @Override
-  public boolean isClosed() {
-    return isClosed;
-  }
-
-  interface Cmd {
-    void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java b/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java
deleted file mode 100644
index 0f450bd..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
-
-public class OverseerRoleCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-  private final CollectionAction operation;
-  private final OverseerNodePrioritizer overseerPrioritizer;
-
-
-
-  public OverseerRoleCmd(OverseerCollectionMessageHandler ocmh, CollectionAction operation, OverseerNodePrioritizer prioritizer) {
-    this.ocmh = ocmh;
-    this.operation = operation;
-    this.overseerPrioritizer = prioritizer;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    Map roles = null;
-    String node = message.getStr("node");
-
-    String roleName = message.getStr("role");
-    boolean nodeExists = false;
-    if (nodeExists = zkClient.exists(ZkStateReader.ROLES, true)) {
-      roles = (Map) Utils.fromJSON(zkClient.getData(ZkStateReader.ROLES, null, new Stat(), true));
-    } else {
-      roles = new LinkedHashMap(1);
-    }
-
-    List nodeList = (List) roles.get(roleName);
-    if (nodeList == null) roles.put(roleName, nodeList = new ArrayList());
-    if (ADDROLE == operation) {
-      log.info("Overseer role added to {}", node);
-      if (!nodeList.contains(node)) nodeList.add(node);
-    } else if (REMOVEROLE == operation) {
-      log.info("Overseer role removed from {}", node);
-      nodeList.remove(node);
-    }
-
-    if (nodeExists) {
-      zkClient.setData(ZkStateReader.ROLES, Utils.toJSON(roles), true);
-    } else {
-      zkClient.create(ZkStateReader.ROLES, Utils.toJSON(roles), CreateMode.PERSISTENT, true);
-    }
-    //if there are too many nodes this command may time out. And most likely dedicated
-    // overseers are created when there are too many nodes  . So , do this operation in a separate thread
-    new Thread(() -> {
-      try {
-        overseerPrioritizer.prioritizeOverseerNodes(ocmh.myId);
-      } catch (Exception e) {
-        log.error("Error in prioritizing Overseer", e);
-      }
-
-    }).start();
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java b/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java
deleted file mode 100644
index aba4872..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java
+++ /dev/null
@@ -1,112 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.codahale.metrics.Timer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.util.stats.MetricUtils;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class OverseerStatusCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public OverseerStatusCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    String leaderNode = OverseerTaskProcessor.getLeaderNode(zkStateReader.getZkClient());
-    results.add("leader", leaderNode);
-    Stat stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/queue",null, stat, true);
-    results.add("overseer_queue_size", stat.getNumChildren());
-    stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/queue-work",null, stat, true);
-    results.add("overseer_work_queue_size", stat.getNumChildren());
-    stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/collection-queue-work",null, stat, true);
-    results.add("overseer_collection_queue_size", stat.getNumChildren());
-
-    NamedList overseerStats = new NamedList();
-    NamedList collectionStats = new NamedList();
-    NamedList stateUpdateQueueStats = new NamedList();
-    NamedList workQueueStats = new NamedList();
-    NamedList collectionQueueStats = new NamedList();
-    Stats stats = ocmh.stats;
-    for (Map.Entry<String, Stats.Stat> entry : stats.getStats().entrySet()) {
-      String key = entry.getKey();
-      NamedList<Object> lst = new SimpleOrderedMap<>();
-      if (key.startsWith("collection_"))  {
-        collectionStats.add(key.substring(11), lst);
-        int successes = stats.getSuccessCount(entry.getKey());
-        int errors = stats.getErrorCount(entry.getKey());
-        lst.add("requests", successes);
-        lst.add("errors", errors);
-        List<Stats.FailedOp> failureDetails = stats.getFailureDetails(key);
-        if (failureDetails != null) {
-          List<SimpleOrderedMap<Object>> failures = new ArrayList<>();
-          for (Stats.FailedOp failedOp : failureDetails) {
-            SimpleOrderedMap<Object> fail = new SimpleOrderedMap<>();
-            fail.add("request", failedOp.req.getProperties());
-            fail.add("response", failedOp.resp.getResponse());
-            failures.add(fail);
-          }
-          lst.add("recent_failures", failures);
-        }
-      } else if (key.startsWith("/overseer/queue_"))  {
-        stateUpdateQueueStats.add(key.substring(16), lst);
-      } else if (key.startsWith("/overseer/queue-work_"))  {
-        workQueueStats.add(key.substring(21), lst);
-      } else if (key.startsWith("/overseer/collection-queue-work_"))  {
-        collectionQueueStats.add(key.substring(32), lst);
-      } else  {
-        // overseer stats
-        overseerStats.add(key, lst);
-        int successes = stats.getSuccessCount(entry.getKey());
-        int errors = stats.getErrorCount(entry.getKey());
-        lst.add("requests", successes);
-        lst.add("errors", errors);
-      }
-      Timer timer = entry.getValue().requestTime;
-      MetricUtils.addMetrics(lst, timer);
-    }
-    results.add("overseer_operations", overseerStats);
-    results.add("collection_operations", collectionStats);
-    results.add("overseer_queue", stateUpdateQueueStats);
-    results.add("overseer_internal_queue", workQueueStats);
-    results.add("collection_queue", collectionQueueStats);
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
deleted file mode 100644
index e903091..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public ReplaceNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    String source = message.getStr(CollectionParams.SOURCE_NODE, message.getStr("source"));
-    String target = message.getStr(CollectionParams.TARGET_NODE, message.getStr("target"));
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-    if (source == null || target == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sourceNode and targetNode are required params" );
-    }
-    String async = message.getStr("async");
-    int timeout = message.getInt("timeout", 10 * 60); // 10 minutes
-    boolean parallel = message.getBool("parallel", false);
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    if (!clusterState.liveNodesContain(source)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + source + " is not live");
-    }
-    if (!clusterState.liveNodesContain(target)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target Node: " + target + " is not live");
-    }
-    List<ZkNodeProps> sourceReplicas = getReplicasOfNode(source, clusterState);
-    // how many leaders are we moving? for these replicas we have to make sure that either:
-    // * another existing replica can become a leader, or
-    // * we wait until the newly created replica completes recovery (and can become the new leader)
-    // If waitForFinalState=true we wait for all replicas
-    int numLeaders = 0;
-    for (ZkNodeProps props : sourceReplicas) {
-      if (props.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
-        numLeaders++;
-      }
-    }
-    // map of collectionName_coreNodeName to watchers
-    Map<String, CollectionStateWatcher> watchers = new HashMap<>();
-    List<ZkNodeProps> createdReplicas = new ArrayList<>();
-
-    AtomicBoolean anyOneFailed = new AtomicBoolean(false);
-    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(sourceReplicas.size(), ocmh);
-
-    SolrCloseableLatch replicasToRecover = new SolrCloseableLatch(numLeaders, ocmh);
-
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      NamedList nl = new NamedList();
-      log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
-      ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
-      if(async!=null) msg.getProperties().put(ASYNC, async);
-      final ZkNodeProps addedReplica = ocmh.addReplica(clusterState,
-          msg, nl, () -> {
-            countDownLatch.countDown();
-            if (nl.get("failure") != null) {
-              String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-                  " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
-              log.warn(errorString);
-              // one replica creation failed. Make the best attempt to
-              // delete all the replicas created so far in the target
-              // and exit
-              synchronized (results) {
-                results.add("failure", errorString);
-                anyOneFailed.set(true);
-              }
-            } else {
-              log.debug("Successfully created replica for collection={} shard={} on node={}",
-                  sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
-            }
-          });
-
-      if (addedReplica != null) {
-        createdReplicas.add(addedReplica);
-        if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
-          String shardName = sourceReplica.getStr(SHARD_ID_PROP);
-          String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
-          String collectionName = sourceReplica.getStr(COLLECTION_PROP);
-          String key = collectionName + "_" + replicaName;
-          CollectionStateWatcher watcher;
-          if (waitForFinalState) {
-            watcher = new ActiveReplicaWatcher(collectionName, null,
-                Collections.singletonList(addedReplica.getStr(ZkStateReader.CORE_NAME_PROP)), replicasToRecover);
-          } else {
-            watcher = new LeaderRecoveryWatcher(collectionName, shardName, replicaName,
-                addedReplica.getStr(ZkStateReader.CORE_NAME_PROP), replicasToRecover);
-          }
-          watchers.put(key, watcher);
-          log.debug("--- adding " + key + ", " + watcher);
-          zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
-        } else {
-          log.debug("--- not waiting for " + addedReplica);
-        }
-      }
-    }
-
-    log.debug("Waiting for replicas to be added");
-    if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
-      log.info("Timed out waiting for replicas to be added");
-      anyOneFailed.set(true);
-    } else {
-      log.debug("Finished waiting for replicas to be added");
-    }
-
-    // now wait for leader replicas to recover
-    log.debug("Waiting for " + numLeaders + " leader replicas to recover");
-    if (!replicasToRecover.await(timeout, TimeUnit.SECONDS)) {
-      log.info("Timed out waiting for " + replicasToRecover.getCount() + " leader replicas to recover");
-      anyOneFailed.set(true);
-    } else {
-      log.debug("Finished waiting for leader replicas to recover");
-    }
-    // remove the watchers, we're done either way
-    for (Map.Entry<String, CollectionStateWatcher> e : watchers.entrySet()) {
-      zkStateReader.removeCollectionStateWatcher(e.getKey(), e.getValue());
-    }
-    if (anyOneFailed.get()) {
-      log.info("Failed to create some replicas. Cleaning up all replicas on target node");
-      SolrCloseableLatch cleanupLatch = new SolrCloseableLatch(createdReplicas.size(), ocmh);
-      for (ZkNodeProps createdReplica : createdReplicas) {
-        NamedList deleteResult = new NamedList();
-        try {
-          ocmh.deleteReplica(zkStateReader.getClusterState(), createdReplica.plus("parallel", "true"), deleteResult, () -> {
-            cleanupLatch.countDown();
-            if (deleteResult.get("failure") != null) {
-              synchronized (results) {
-                results.add("failure", "Could not cleanup, because of : " + deleteResult.get("failure"));
-              }
-            }
-          });
-        } catch (KeeperException e) {
-          cleanupLatch.countDown();
-          log.warn("Error deleting replica ", e);
-        } catch (Exception e) {
-          log.warn("Error deleting replica ", e);
-          cleanupLatch.countDown();
-          throw e;
-        }
-      }
-      cleanupLatch.await(5, TimeUnit.MINUTES);
-      return;
-    }
-
-
-    // we have reached this far means all replicas could be recreated
-    //now cleanup the replicas in the source node
-    DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
-    results.add("success", "REPLACENODE action completed successfully from  : " + source + " to : " + target);
-  }
-
-  static List<ZkNodeProps> getReplicasOfNode(String source, ClusterState state) {
-    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
-    for (Map.Entry<String, DocCollection> e : state.getCollectionsMap().entrySet()) {
-      for (Slice slice : e.getValue().getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          if (source.equals(replica.getNodeName())) {
-            ZkNodeProps props = new ZkNodeProps(
-                COLLECTION_PROP, e.getKey(),
-                SHARD_ID_PROP, slice.getName(),
-                ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
-                ZkStateReader.REPLICA_PROP, replica.getName(),
-                ZkStateReader.REPLICA_TYPE, replica.getType().name(),
-                ZkStateReader.LEADER_PROP, String.valueOf(replica.equals(slice.getLeader())),
-                CoreAdminParams.NODE, source);
-            sourceReplicas.add(props);
-          }
-        }
-      }
-    }
-    return sourceReplicas;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
deleted file mode 100644
index 9c9a5c9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROPS;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
-import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public RestoreCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    // TODO maybe we can inherit createCollection's options/code
-
-    String restoreCollectionName = message.getStr(COLLECTION_PROP);
-    String backupName = message.getStr(NAME); // of backup
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    String asyncId = message.getStr(ASYNC);
-    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-    Map<String, String> requestMap = new HashMap<>();
-
-    CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
-    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-
-    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-    URI backupPath = repository.resolve(location, backupName);
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    BackupManager backupMgr = new BackupManager(repository, zkStateReader);
-
-    Properties properties = backupMgr.readBackupProperties(location, backupName);
-    String backupCollection = properties.getProperty(BackupManager.COLLECTION_NAME_PROP);
-    DocCollection backupCollectionState = backupMgr.readCollectionState(location, backupName, backupCollection);
-
-    // Get the Solr nodes to restore a collection.
-    final List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(
-        zkStateReader.getClusterState().getLiveNodes(), message, RANDOM);
-
-    int numShards = backupCollectionState.getActiveSlices().size();
-    
-    int numNrtReplicas = getInt(message, NRT_REPLICAS, backupCollectionState.getNumNrtReplicas(), 0);
-    if (numNrtReplicas == 0) {
-      numNrtReplicas = getInt(message, REPLICATION_FACTOR, backupCollectionState.getReplicationFactor(), 0);
-    }
-    int numTlogReplicas = getInt(message, TLOG_REPLICAS, backupCollectionState.getNumTlogReplicas(), 0);
-    int numPullReplicas = getInt(message, PULL_REPLICAS, backupCollectionState.getNumPullReplicas(), 0);
-    int totalReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
-    
-    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, backupCollectionState.getMaxShardsPerNode());
-    int availableNodeCount = nodeList.size();
-    if ((numShards * totalReplicasPerShard) > (availableNodeCount * maxShardsPerNode)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "Solr cloud with available number of nodes:%d is insufficient for"
-              + " restoring a collection with %d shards, total replicas per shard %d and maxShardsPerNode %d."
-              + " Consider increasing maxShardsPerNode value OR number of available nodes.",
-              availableNodeCount, numShards, totalReplicasPerShard, maxShardsPerNode));
-    }
-
-    //Upload the configs
-    String configName = (String) properties.get(COLL_CONF);
-    String restoreConfigName = message.getStr(COLL_CONF, configName);
-    if (zkStateReader.getConfigManager().configExists(restoreConfigName)) {
-      log.info("Using existing config {}", restoreConfigName);
-      //TODO add overwrite option?
-    } else {
-      log.info("Uploading config {}", restoreConfigName);
-      backupMgr.uploadConfigDir(location, backupName, configName, restoreConfigName);
-    }
-
-    log.info("Starting restore into collection={} with backup_name={} at location={}", restoreCollectionName, backupName,
-        location);
-
-    //Create core-less collection
-    {
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, CREATE.toString());
-      propMap.put("fromApi", "true"); // mostly true.  Prevents autoCreated=true in the collection state.
-      if (properties.get(STATE_FORMAT) == null) {
-        propMap.put(STATE_FORMAT, "2");
-      }
-
-      // inherit settings from input API, defaulting to the backup's setting.  Ex: replicationFactor
-      for (String collProp : COLL_PROPS.keySet()) {
-        Object val = message.getProperties().getOrDefault(collProp, backupCollectionState.get(collProp));
-        if (val != null) {
-          propMap.put(collProp, val);
-        }
-      }
-
-      propMap.put(NAME, restoreCollectionName);
-      propMap.put(CREATE_NODE_SET, CREATE_NODE_SET_EMPTY); //no cores
-      propMap.put(COLL_CONF, restoreConfigName);
-
-      // router.*
-      @SuppressWarnings("unchecked")
-      Map<String, Object> routerProps = (Map<String, Object>) backupCollectionState.getProperties().get(DocCollection.DOC_ROUTER);
-      for (Map.Entry<String, Object> pair : routerProps.entrySet()) {
-        propMap.put(DocCollection.DOC_ROUTER + "." + pair.getKey(), pair.getValue());
-      }
-
-      Set<String> sliceNames = backupCollectionState.getActiveSlicesMap().keySet();
-      if (backupCollectionState.getRouter() instanceof ImplicitDocRouter) {
-        propMap.put(SHARDS_PROP, StrUtils.join(sliceNames, ','));
-      } else {
-        propMap.put(NUM_SLICES, sliceNames.size());
-        // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure instead of a
-        //   list of names, and if so uses this instead of building it.  We clear the replica list.
-        Collection<Slice> backupSlices = backupCollectionState.getActiveSlices();
-        Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
-        for (Slice backupSlice : backupSlices) {
-          newSlices.put(backupSlice.getName(),
-              new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties()));
-        }
-        propMap.put(SHARDS_PROP, newSlices);
-      }
-
-      ocmh.commandMap.get(CREATE).call(zkStateReader.getClusterState(), new ZkNodeProps(propMap), new NamedList());
-      // note: when createCollection() returns, the collection exists (no race)
-    }
-
-    DocCollection restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-
-    //Mark all shards in CONSTRUCTION STATE while we restore the data
-    {
-      //TODO might instead createCollection accept an initial state?  Is there a race?
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-      for (Slice shard : restoreCollection.getSlices()) {
-        propMap.put(shard.getName(), Slice.State.CONSTRUCTION.toString());
-      }
-      propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
-      inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-    }
-
-    // TODO how do we leverage the RULE / SNITCH logic in createCollection?
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    List<String> sliceNames = new ArrayList<>();
-    restoreCollection.getSlices().forEach(x -> sliceNames.add(x.getName()));
-    PolicyHelper.SessionWrapper sessionWrapper = null;
-
-    try {
-      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(
-          ocmh.cloudManager, clusterState,
-          nodeList, restoreCollectionName,
-          message, sliceNames,
-          numNrtReplicas, numTlogReplicas, numPullReplicas);
-      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
-      //Create one replica per shard and copy backed up data to it
-      for (Slice slice : restoreCollection.getSlices()) {
-        log.debug("Adding replica for shard={} collection={} ", slice.getName(), restoreCollection);
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD);
-        propMap.put(COLLECTION_PROP, restoreCollectionName);
-        propMap.put(SHARD_ID_PROP, slice.getName());
-
-        if (numNrtReplicas >= 1) {
-          propMap.put(REPLICA_TYPE, Replica.Type.NRT.name());
-        } else if (numTlogReplicas >= 1) {
-          propMap.put(REPLICA_TYPE, Replica.Type.TLOG.name());
-        } else {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Unexpected number of replicas, replicationFactor, " +
-              Replica.Type.NRT + " or " + Replica.Type.TLOG + " must be greater than 0");
-        }
-
-        // Get the first node matching the shard to restore in
-        String node;
-        for (ReplicaPosition replicaPosition : replicaPositions) {
-          if (Objects.equals(replicaPosition.shard, slice.getName())) {
-            node = replicaPosition.node;
-            propMap.put(CoreAdminParams.NODE, node);
-            replicaPositions.remove(replicaPosition);
-            break;
-          }
-        }
-
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        ocmh.addPropertyParams(message, propMap);
-
-        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), new NamedList(), null);
-      }
-
-      //refresh the location copy of collection state
-      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-      //Copy data from backed up index to each replica
-      for (Slice slice : restoreCollection.getSlices()) {
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
-        params.set(NAME, "snapshot." + slice.getName());
-        params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
-        params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
-
-        ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
-      }
-      ocmh.processResponses(new NamedList(), shardHandler, true, "Could not restore core", asyncId, requestMap);
-
-      //Mark all shards in ACTIVE STATE
-      {
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
-        for (Slice shard : restoreCollection.getSlices()) {
-          propMap.put(shard.getName(), Slice.State.ACTIVE.toString());
-        }
-        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-      }
-
-      //refresh the location copy of collection state
-      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-      if (totalReplicasPerShard > 1) {
-        log.info("Adding replicas to restored collection={}", restoreCollection);
-        for (Slice slice : restoreCollection.getSlices()) {
-
-          //Add the remaining replicas for each shard, considering it's type
-          int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
-
-          // We already created either a NRT or an TLOG replica as leader
-          if (numNrtReplicas > 0) {
-            createdNrtReplicas++;
-          } else if (createdTlogReplicas > 0) {
-            createdTlogReplicas++;
-          }
-
-          for (int i = 1; i < totalReplicasPerShard; i++) {
-            Replica.Type typeToCreate;
-            if (createdNrtReplicas < numNrtReplicas) {
-              createdNrtReplicas++;
-              typeToCreate = Replica.Type.NRT;
-            } else if (createdTlogReplicas < numTlogReplicas) {
-              createdTlogReplicas++;
-              typeToCreate = Replica.Type.TLOG;
-            } else {
-              createdPullReplicas++;
-              typeToCreate = Replica.Type.PULL;
-              assert createdPullReplicas <= numPullReplicas: "Unexpected number of replicas";
-            }
-
-            log.debug("Adding replica for shard={} collection={} of type {} ", slice.getName(), restoreCollection, typeToCreate);
-            HashMap<String, Object> propMap = new HashMap<>();
-            propMap.put(COLLECTION_PROP, restoreCollectionName);
-            propMap.put(SHARD_ID_PROP, slice.getName());
-            propMap.put(REPLICA_TYPE, typeToCreate.name());
-
-            // Get the first node matching the shard to restore in
-            String node;
-            for (ReplicaPosition replicaPosition : replicaPositions) {
-              if (Objects.equals(replicaPosition.shard, slice.getName())) {
-                node = replicaPosition.node;
-                propMap.put(CoreAdminParams.NODE, node);
-                replicaPositions.remove(replicaPosition);
-                break;
-              }
-            }
-
-            // add async param
-            if (asyncId != null) {
-              propMap.put(ASYNC, asyncId);
-            }
-            ocmh.addPropertyParams(message, propMap);
-
-            ocmh.addReplica(zkStateReader.getClusterState(), new ZkNodeProps(propMap), results, null);
-          }
-        }
-      }
-
-      log.info("Completed restoring collection={} backupName={}", restoreCollection, backupName);
-    } finally {
-      if (sessionWrapper != null) sessionWrapper.release();
-    }
-  }
-
-  private int getInt(ZkNodeProps message, String propertyName, Integer count, int defaultValue) {
-    Integer value = message.getInt(propertyName, count);
-    return value!=null ? value:defaultValue;
-  }
-}


[03/41] lucene-solr:jira/solr-11702: Add 7.2.1 back compat test indexes

Posted by da...@apache.org.
Add 7.2.1 back compat test indexes


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f7f0ed1f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f7f0ed1f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f7f0ed1f

Branch: refs/heads/jira/solr-11702
Commit: f7f0ed1fc5ce7e23fc158ad3d3e3c541acfc37b3
Parents: b6f6519
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Jan 15 10:57:22 2018 +0100
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Jan 15 10:57:22 2018 +0100

----------------------------------------------------------------------
 .../lucene/index/TestBackwardsCompatibility.java   |   7 +++++--
 .../org/apache/lucene/index/index.7.2.1-cfs.zip    | Bin 0 -> 15605 bytes
 .../org/apache/lucene/index/index.7.2.1-nocfs.zip  | Bin 0 -> 15606 bytes
 .../test/org/apache/lucene/index/sorted.7.2.1.zip  | Bin 0 -> 93388 bytes
 4 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f7f0ed1f/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 98e7aaf..edeb0ee 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -293,7 +293,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     "7.1.0-cfs",
     "7.1.0-nocfs",
     "7.2.0-cfs",
-    "7.2.0-nocfs"
+    "7.2.0-nocfs",
+    "7.2.1-cfs",
+    "7.2.1-nocfs"
   };
 
   public static String[] getOldNames() {
@@ -304,7 +306,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     "sorted.7.0.0",
     "sorted.7.0.1",
     "sorted.7.1.0",
-    "sorted.7.2.0"
+    "sorted.7.2.0",
+    "sorted.7.2.1"
   };
 
   public static String[] getOldSortedNames() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f7f0ed1f/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-cfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-cfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-cfs.zip
new file mode 100644
index 0000000..e579dab
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-cfs.zip differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f7f0ed1f/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-nocfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-nocfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-nocfs.zip
new file mode 100644
index 0000000..68f14a4
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.2.1-nocfs.zip differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f7f0ed1f/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.2.1.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.2.1.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.2.1.zip
new file mode 100644
index 0000000..80e676a
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.2.1.zip differ


[04/41] lucene-solr:jira/solr-11702: SOLR-11063: fixed test failure.

Posted by da...@apache.org.
SOLR-11063: fixed test failure.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e4438a2b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e4438a2b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e4438a2b

Branch: refs/heads/jira/solr-11702
Commit: e4438a2bb7c0a4c22d8bf65078691e6dec8f5ba0
Parents: f7f0ed1
Author: Noble Paul <no...@apache.org>
Authored: Tue Jan 16 00:24:55 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Jan 16 00:24:55 2018 +1100

----------------------------------------------------------------------
 .../org/apache/solr/client/solrj/cloud/autoscaling/Policy.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e4438a2b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
index 15c63b8..4697572 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
@@ -201,7 +201,7 @@ public class Policy implements MapWriter {
     if (!getPolicies().equals(policy.getPolicies())) return false;
     if (!getClusterPolicy().equals(policy.getClusterPolicy())) return false;
     if (!getClusterPreferences().equals(policy.getClusterPreferences())) return false;
-    return params.equals(policy.params);
+    return true;
   }
 
   /*This stores the logical state of the system, given a policy and


[39/41] lucene-solr:jira/solr-11702: SOLR-11624: Autocreated configsets will not use .AUTOCREATED suffix

Posted by da...@apache.org.
SOLR-11624: Autocreated configsets will not use .AUTOCREATED suffix


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/183835ed
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/183835ed
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/183835ed

Branch: refs/heads/jira/solr-11702
Commit: 183835ed2485915006746e456d7124cb5d5d4abb
Parents: 4aeabe7
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Mon Jan 22 19:48:41 2018 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Mon Jan 22 19:48:41 2018 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                | 10 +++++
 .../api/collections/CreateCollectionCmd.java    | 21 +++++-----
 .../handler/admin/ConfigSetsHandlerApi.java     |  6 +++
 .../TimeRoutedAliasUpdateProcessorTest.java     | 40 ++++++++++++++------
 solr/solr-ref-guide/src/collections-api.adoc    |  2 +-
 5 files changed, 55 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183835ed/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9d3cbcb..0291a5f 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -67,6 +67,10 @@ Upgrade Notes
   Previously, the cool down period was a fixed period started after actions for a trigger event finish
   executing. During the cool down period, triggers wo
 
+* SOLR-11624: Collections created without specifying a configset name use a copy of the _default configset since 7.0.
+  Before 7.3, the copied over configset was named the same as the collection name, but 7.3 onwards it will be named
+  with an additional ".AUTOCREATED" suffix.
+
 New Features
 ----------------------
 * SOLR-11285: Simulation framework for autoscaling. (ab)
@@ -165,6 +169,12 @@ Other Changes
 
 * SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available (noble)
 
+* SOLR-11624: Collections created from _default configset will now be associated with a configset with a suffix
+  .AUTOCREATED. For example, a new collection "mycollection", created without specifying a configset name, will
+  use the _default configset and the associated configset name will be "mycollection.AUTOCREATED". If this
+  collection is deleted and re-created, the autocreated configset will be left behind and will be re-used for
+  the re-created collection (Ishan Chattopadhyaya, Abhishek Kumar Singh)
+
 ==================  7.2.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183835ed/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 4d9c971..d5ceb6a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -350,10 +350,13 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
       try {
         configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
         if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
-          if (!CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
-            copyDefaultConfigSetTo(configNames, coll);
+          if (CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
+            return coll;
+          } else {
+            String intendedConfigSetName = ConfigSetsHandlerApi.getSuffixedNameForAutoGeneratedConfigSet(coll);
+            copyDefaultConfigSetTo(configNames, intendedConfigSetName);
+            return intendedConfigSetName;
           }
-          return coll;
         } else if (configNames != null && configNames.size() == 1) {
           configName = configNames.get(0);
           // no config set named, but there is only 1 - use it
@@ -372,17 +375,11 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
   private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
     ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
 
-    // if a configset named coll exists, delete the configset so that _default can be copied over
+    // if a configset named collection exists, re-use it
     if (configNames.contains(targetConfig)) {
       log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
-          ", deleting it so that we can copy the _default configs over and create the collection.");
-      try {
-        configManager.deleteConfigDir(targetConfig);
-      } catch (Exception e) {
-        throw new SolrException(ErrorCode.INVALID_STATE, "Error while deleting configset: " + targetConfig, e);
-      }
-    } else {
-      log.info("Only _default config set found, using it.");
+          ", re-using it.");
+      return;
     }
     // Copy _default into targetConfig
     try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183835ed/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java
index 2028f67..1a5f6f3 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandlerApi.java
@@ -32,10 +32,16 @@ import org.apache.solr.response.SolrQueryResponse;
 public class ConfigSetsHandlerApi extends BaseHandlerApiSupport {
 
   final public static String DEFAULT_CONFIGSET_NAME = "_default";
+  final public static String AUTOCREATED_CONFIGSET_SUFFIX = ".AUTOCREATED";
+
 
   final ConfigSetsHandler configSetHandler;
   static Collection<ApiCommand> apiCommands = createMapping();
 
+  public static String getSuffixedNameForAutoGeneratedConfigSet(String configName) {
+    return configName + AUTOCREATED_CONFIGSET_SUFFIX;
+  }
+
   private static Collection<ApiCommand> createMapping() {
     Map<ConfigSetMeta, ApiCommand> result = new EnumMap<>(ConfigSetMeta.class);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183835ed/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
index 102377d..f524fda 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
@@ -36,6 +36,7 @@ import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
 import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.client.solrj.response.ConfigSetAdminResponse;
 import org.apache.solr.client.solrj.response.FieldStatsInfo;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
@@ -78,14 +79,21 @@ public class TimeRoutedAliasUpdateProcessorTest extends SolrCloudTestCase {
 
   @Test
   public void test() throws Exception {
-    // First create a config using REST API.  To do this, we create a collection with the name of the eventual config.
-    // We configure it, and ultimately delete it the collection, leaving a config with the same name behind.
-    // Then when we create the "real" collections referencing this config.
-    CollectionAdminRequest.createCollection(configName, 1, 1).process(solrClient);
+
+    // First create a configSet
+    // Then we create a collection with the name of the eventual config.
+    // We configure it, and ultimately delete the collection, leaving a modified config-set behind.
+    // Then when we create the "real" collections referencing this modified config-set.
+    final ConfigSetAdminRequest.Create adminRequest = new ConfigSetAdminRequest.Create();
+        adminRequest.setConfigSetName(configName);
+        adminRequest.setBaseConfigSetName("_default");
+        ConfigSetAdminResponse adminResponse = adminRequest.process(solrClient);
+        assertEquals(adminResponse.getStatus(), 0);
+
+    CollectionAdminRequest.createCollection(configName, configName,1, 1).process(solrClient);
     // manipulate the config...
-    checkNoError(solrClient.request(new V2Request.Builder("/collections/" + configName + "/config")
-        .withMethod(SolrRequest.METHOD.POST)
-        .withPayload("{" +
+
+        String conf = "{" +
             "  'set-user-property' : {'timePartitionAliasName':'" + alias + "'}," + // no data driven
             "  'set-user-property' : {'update.autoCreateFields':false}," + // no data driven
             "  'add-updateprocessor' : {" +
@@ -95,8 +103,10 @@ public class TimeRoutedAliasUpdateProcessorTest extends SolrCloudTestCase {
             "    'name':'inc', 'class':'" + IncrementURPFactory.class.getName() + "'," +
             "    'fieldName':'" + intField + "'" +
             "  }," +
-            "}").build()));
-    // only sometimes test with "tolerant" URP
+            "}";
+    checkNoError(solrClient.request(new V2Request.Builder("/collections/" + configName + "/config")
+        .withMethod(SolrRequest.METHOD.POST)
+        .withPayload(conf).build()));    // only sometimes test with "tolerant" URP
     final String urpNames = "inc" + (random().nextBoolean() ? ",tolerant" : "");
     checkNoError(solrClient.request(new V2Request.Builder("/collections/" + configName + "/config/params")
         .withMethod(SolrRequest.METHOD.POST)
@@ -107,6 +117,11 @@ public class TimeRoutedAliasUpdateProcessorTest extends SolrCloudTestCase {
             "}").build()));
     CollectionAdminRequest.deleteCollection(configName).process(solrClient);
 
+    assertTrue(
+        new ConfigSetAdminRequest.List().process(solrClient).getConfigSets()
+            .contains(configName)
+    );
+
     // start with one collection and an alias for it
     final String col23rd = alias + "_2017-10-23";
     CollectionAdminRequest.createCollection(col23rd, configName, 2, 2)
@@ -114,8 +129,11 @@ public class TimeRoutedAliasUpdateProcessorTest extends SolrCloudTestCase {
         .withProperty(TimeRoutedAliasUpdateProcessor.TIME_PARTITION_ALIAS_NAME_CORE_PROP, alias)
         .process(solrClient);
 
-    assertEquals("We only expect 2 configSets",
-        Arrays.asList("_default", configName), new ConfigSetAdminRequest.List().process(solrClient).getConfigSets());
+    List<String> retrievedConfigSetNames = new ConfigSetAdminRequest.List().process(solrClient).getConfigSets();
+    List<String> expectedConfigSetNames = Arrays.asList("_default", configName);
+    assertTrue("We only expect 2 configSets",
+        expectedConfigSetNames.size() == retrievedConfigSetNames.size());
+    assertTrue("ConfigNames should be :" + expectedConfigSetNames, expectedConfigSetNames.containsAll(retrievedConfigSetNames) && retrievedConfigSetNames.containsAll(expectedConfigSetNames));
 
     CollectionAdminRequest.createAlias(alias, col23rd).process(solrClient);
     //TODO use SOLR-11617 client API to set alias metadata

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183835ed/solr/solr-ref-guide/src/collections-api.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index a0c8038..8a48ba0 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -85,7 +85,7 @@ A `false` value makes the results of a collection creation predictable and gives
 This parameter is ignored if `createNodeSet` is not also specified.
 
 `collection.configName`::
-Defines the name of the configurations (which *must already be stored in ZooKeeper*) to use for this collection. If not provided, Solr will default to the collection name as the configuration name.
+Defines the name of the configuration (which *must already be stored in ZooKeeper*) to use for this collection. If not provided, Solr will use the configuration of `_default` configSet to create a new (and mutable) configSet named `<collectionName>.AUTOCREATED` and will use it for the new collection. When such a collection (that uses a copy of the _default configset) is deleted, the autocreated configset is not deleted by default.
 
 `router.field`::
 If this parameter is specified, the router will look at the value of the field in an input document to compute the hash and identify a shard instead of looking at the `uniqueKey` field. If the field specified is null in the document, the document will be rejected.


[38/41] lucene-solr:jira/solr-11702: SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available

Posted by da...@apache.org.
SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4aeabe7f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4aeabe7f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4aeabe7f

Branch: refs/heads/jira/solr-11702
Commit: 4aeabe7ff25c78867cb10993a60aada1faadd0af
Parents: d0a5dbe
Author: Noble Paul <no...@apache.org>
Authored: Tue Jan 23 00:14:38 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Jan 23 00:14:38 2018 +1100

----------------------------------------------------------------------
 solr/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4aeabe7f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7b2f1d3..9d3cbcb 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -162,8 +162,8 @@ Other Changes
 * SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
 
 * SOLR-11747: Pause triggers until actions finish executing and the cool down period expires. (shalin)
-SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available
-*  (noble)
+
+* SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available (noble)
 
 ==================  7.2.1 ==================
 


[02/41] lucene-solr:jira/solr-11702: synchronize changes.txt for 7.2.1 release

Posted by da...@apache.org.
synchronize changes.txt for 7.2.1 release


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b6f65197
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b6f65197
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b6f65197

Branch: refs/heads/jira/solr-11702
Commit: b6f6519749c6fc44e6d214c39cc92ca4561f3715
Parents: 518a3ec
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Jan 15 10:34:47 2018 +0100
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Jan 15 10:35:52 2018 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt |  6 ++++++
 solr/CHANGES.txt   | 16 ++++++++++++++++
 2 files changed, 22 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b6f65197/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index df4cbc4..435a461 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -154,6 +154,12 @@ Other
 * LUCENE-8075: Removed unnecessary null check in IntersectTermsEnum.
   (Pulak Ghosh via Adrien Grand)
 
+======================= Lucene 7.2.1 =======================
+
+Bug Fixes
+
+* LUCENE-8117: Fix advanceExact on SortedNumericDocValues produced by Lucene54DocValues. (Jim Ferenczi).
+
 ======================= Lucene 7.2.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b6f65197/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3d1cda2..8cac79a 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -145,16 +145,32 @@ Other Changes
 
 ==================  7.2.1 ==================
 
+Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
+
+Versions of Major Components
+---------------------
+Apache Tika 1.16
+Carrot2 3.15.0
+Velocity 1.7 and Velocity Tools 2.0
+Apache UIMA 2.3.1
+Apache ZooKeeper 3.4.10
+Jetty 9.3.20.v20170531
+
 Bug Fixes
 ----------------------
 
 * SOLR-11771: Overseer can never process some last messages (Cao Manh Dat)
 
+* SOLR-11783: Rename core in solr standalone mode is not persisted (Erick Erickson)
+
 * SOLR-11809: QueryComponent.prepare rq parsing could fail under SOLR 7.2.0 - fix:
   QueryComponent's rq parameter parsing no longer considers the defType parameter.
   (Christine Poerschke and David Smiley in response to bug report/analysis
   from Dariusz Wojtas and Diego Ceccarelli)
 
+* SOLR-11555: If the query terms reduce to nothing, filter(clause) produces an NPE whereas
+  fq=clause does not (Erick Erickson)
+
 ==================  7.2.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.


[40/41] lucene-solr:jira/solr-11702: Merge branch 'master' into jira/solr-11702

Posted by da...@apache.org.
Merge branch 'master' into jira/solr-11702


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/278442ba
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/278442ba
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/278442ba

Branch: refs/heads/jira/solr-11702
Commit: 278442ba8382408b780c44005afb4267adfc1188
Parents: e8c17c1 183835e
Author: Cao Manh Dat <da...@apache.org>
Authored: Tue Jan 23 15:29:32 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Tue Jan 23 15:29:32 2018 +0700

----------------------------------------------------------------------
 dev-tools/doap/lucene.rdf                       |    7 +
 dev-tools/doap/solr.rdf                         |    7 +
 dev-tools/idea/solr/contrib/langid/langid.iml   |    1 +
 lucene/CHANGES.txt                              |   14 +
 .../compound/hyphenation/HyphenationTree.java   |    4 +-
 .../lucene/analysis/util/UnicodeProps.java      |  116 +-
 .../compound/TestCompoundWordTokenFilter.java   |   15 +
 .../compound/hyphenation-LUCENE-8124.xml        |   61 ++
 .../lucene/analysis/icu/ICUFoldingFilter.java   |   28 +-
 .../analysis/icu/ICUFoldingFilterFactory.java   |   20 +-
 .../icu/TestICUFoldingFilterFactory.java        |   21 +-
 .../index/TestBackwardsCompatibility.java       |    7 +-
 .../org/apache/lucene/index/index.7.2.1-cfs.zip |  Bin 0 -> 15605 bytes
 .../apache/lucene/index/index.7.2.1-nocfs.zip   |  Bin 0 -> 15606 bytes
 .../org/apache/lucene/index/sorted.7.2.1.zip    |  Bin 0 -> 93388 bytes
 .../org/apache/lucene/index/TermStates.java     |    2 +-
 .../org/apache/lucene/index/TestTermStates.java |   36 +
 lucene/ivy-versions.properties                  |    2 +-
 ...jetty-continuation-9.3.20.v20170531.jar.sha1 |    1 -
 .../jetty-continuation-9.4.8.v20171121.jar.sha1 |    1 +
 .../jetty-http-9.3.20.v20170531.jar.sha1        |    1 -
 .../jetty-http-9.4.8.v20171121.jar.sha1         |    1 +
 .../licenses/jetty-io-9.3.20.v20170531.jar.sha1 |    1 -
 .../licenses/jetty-io-9.4.8.v20171121.jar.sha1  |    1 +
 .../jetty-server-9.3.20.v20170531.jar.sha1      |    1 -
 .../jetty-server-9.4.8.v20171121.jar.sha1       |    1 +
 .../jetty-servlet-9.3.20.v20170531.jar.sha1     |    1 -
 .../jetty-servlet-9.4.8.v20171121.jar.sha1      |    1 +
 .../jetty-util-9.3.20.v20170531.jar.sha1        |    1 -
 .../jetty-util-9.4.8.v20171121.jar.sha1         |    1 +
 .../lucene/replicator/ReplicatorTestCase.java   |    4 +-
 solr/CHANGES.txt                                |   50 +-
 solr/contrib/langid/README.txt                  |    3 +-
 solr/contrib/langid/build.xml                   |   60 ++
 solr/contrib/langid/ivy.xml                     |    1 +
 .../LanguageIdentifierUpdateProcessor.java      |   64 ++
 .../OpenNLPLangDetectUpdateProcessor.java       |   80 ++
 ...OpenNLPLangDetectUpdateProcessorFactory.java |  130 +++
 .../TikaLanguageIdentifierUpdateProcessor.java  |   65 --
 .../opennlp-langdetect.eng-swe-spa-rus-deu.bin  |  Bin 0 -> 17702 bytes
 .../conf/solrconfig-languageidentifier.xml      |   25 +-
 .../opennlp.langdetect.trainer.params.txt       |   17 +
 ...dentifierUpdateProcessorFactoryTestCase.java |    6 +-
 ...NLPLangDetectUpdateProcessorFactoryTest.java |   66 ++
 .../client/solrj/embedded/JettySolrRunner.java  |    8 +-
 .../org/apache/solr/cloud/AddReplicaCmd.java    |  279 -----
 .../src/java/org/apache/solr/cloud/Assign.java  |  483 ---------
 .../java/org/apache/solr/cloud/BackupCmd.java   |  225 ----
 .../solr/cloud/CloudConfigSetService.java       |    1 +
 .../java/org/apache/solr/cloud/CloudUtil.java   |    2 +-
 .../org/apache/solr/cloud/CreateAliasCmd.java   |  101 --
 .../apache/solr/cloud/CreateCollectionCmd.java  |  549 ----------
 .../org/apache/solr/cloud/CreateShardCmd.java   |  191 ----
 .../apache/solr/cloud/CreateSnapshotCmd.java    |  179 ---
 .../org/apache/solr/cloud/DeleteAliasCmd.java   |   43 -
 .../apache/solr/cloud/DeleteCollectionCmd.java  |  141 ---
 .../org/apache/solr/cloud/DeleteNodeCmd.java    |  137 ---
 .../org/apache/solr/cloud/DeleteReplicaCmd.java |  281 -----
 .../org/apache/solr/cloud/DeleteShardCmd.java   |  178 ---
 .../apache/solr/cloud/DeleteSnapshotCmd.java    |  160 ---
 .../solr/cloud/ExclusiveSliceProperty.java      |    5 +-
 .../solr/cloud/LeaderRecoveryWatcher.java       |   88 --
 .../java/org/apache/solr/cloud/MigrateCmd.java  |  337 ------
 .../org/apache/solr/cloud/MoveReplicaCmd.java   |  302 ------
 .../java/org/apache/solr/cloud/Overseer.java    |    1 +
 .../OverseerCollectionConfigSetProcessor.java   |    1 +
 .../cloud/OverseerCollectionMessageHandler.java | 1003 -----------------
 .../org/apache/solr/cloud/OverseerRoleCmd.java  |  102 --
 .../apache/solr/cloud/OverseerStatusCmd.java    |  112 --
 .../org/apache/solr/cloud/ReplaceNodeCmd.java   |  226 ----
 .../java/org/apache/solr/cloud/RestoreCmd.java  |  363 -------
 .../cloud/RoutedAliasCreateCollectionCmd.java   |  182 ----
 .../org/apache/solr/cloud/SplitShardCmd.java    |  542 ----------
 .../org/apache/solr/cloud/UtilizeNodeCmd.java   |  120 ---
 .../cloud/api/collections/AddReplicaCmd.java    |  282 +++++
 .../solr/cloud/api/collections/Assign.java      |  483 +++++++++
 .../solr/cloud/api/collections/BackupCmd.java   |  224 ++++
 .../cloud/api/collections/CreateAliasCmd.java   |  100 ++
 .../api/collections/CreateCollectionCmd.java    |  544 ++++++++++
 .../cloud/api/collections/CreateShardCmd.java   |  190 ++++
 .../api/collections/CreateSnapshotCmd.java      |  179 +++
 .../cloud/api/collections/DeleteAliasCmd.java   |   43 +
 .../api/collections/DeleteCollectionCmd.java    |  142 +++
 .../cloud/api/collections/DeleteNodeCmd.java    |  137 +++
 .../cloud/api/collections/DeleteReplicaCmd.java |  280 +++++
 .../cloud/api/collections/DeleteShardCmd.java   |  178 +++
 .../api/collections/DeleteSnapshotCmd.java      |  160 +++
 .../api/collections/LeaderRecoveryWatcher.java  |   88 ++
 .../solr/cloud/api/collections/MigrateCmd.java  |  334 ++++++
 .../cloud/api/collections/MoveReplicaCmd.java   |  303 ++++++
 .../OverseerCollectionMessageHandler.java       | 1011 +++++++++++++++++
 .../cloud/api/collections/OverseerRoleCmd.java  |  102 ++
 .../api/collections/OverseerStatusCmd.java      |  113 ++
 .../cloud/api/collections/ReplaceNodeCmd.java   |  227 ++++
 .../solr/cloud/api/collections/RestoreCmd.java  |  357 ++++++
 .../RoutedAliasCreateCollectionCmd.java         |  184 ++++
 .../cloud/api/collections/SplitShardCmd.java    |  540 ++++++++++
 .../cloud/api/collections/UtilizeNodeCmd.java   |  120 +++
 .../cloud/api/collections/package-info.java     |   23 +
 .../cloud/autoscaling/ScheduledTriggers.java    |   36 +-
 .../cloud/overseer/ClusterStateMutator.java     |    2 +-
 .../solr/cloud/overseer/ReplicaMutator.java     |   12 +-
 .../solr/cloud/overseer/SliceMutator.java       |   13 +-
 .../org/apache/solr/core/CoreContainer.java     |    3 +
 .../org/apache/solr/handler/StreamHandler.java  |   16 +-
 .../solr/handler/admin/CollectionsHandler.java  |   24 +-
 .../handler/admin/ConfigSetsHandlerApi.java     |    6 +
 .../apache/solr/schema/AbstractEnumField.java   |   24 +-
 .../org/apache/solr/schema/DatePointField.java  |    6 -
 .../apache/solr/schema/DoublePointField.java    |    6 -
 .../org/apache/solr/schema/EnumFieldType.java   |   11 +
 .../java/org/apache/solr/schema/FieldType.java  |   80 +-
 .../org/apache/solr/schema/FloatPointField.java |    6 -
 .../org/apache/solr/schema/IntPointField.java   |    6 -
 .../org/apache/solr/schema/LongPointField.java  |    6 -
 .../java/org/apache/solr/schema/NumberType.java |   34 +-
 .../java/org/apache/solr/schema/PointField.java |    6 +
 .../apache/solr/schema/PrimitiveFieldType.java  |    5 +
 .../org/apache/solr/schema/SchemaField.java     |    8 +-
 .../java/org/apache/solr/schema/StrField.java   |   28 +
 .../java/org/apache/solr/schema/TrieField.java  |   39 +-
 .../TimeRoutedAliasUpdateProcessor.java         |    2 +-
 .../solr/collection1/conf/schema11.xml          |   25 +
 .../AbstractCloudBackupRestoreTestCase.java     |  346 ------
 .../test/org/apache/solr/cloud/AssignTest.java  |  155 ---
 .../solr/cloud/BasicDistributedZkTest.java      |    1 +
 .../solr/cloud/ChaosMonkeyShardSplitTest.java   |    5 +
 .../apache/solr/cloud/CollectionReloadTest.java |   84 --
 .../cloud/CollectionTooManyReplicasTest.java    |  221 ----
 .../CollectionsAPIAsyncDistributedZkTest.java   |  177 ---
 .../cloud/CollectionsAPIDistributedZkTest.java  |  684 ------------
 ...ConcurrentDeleteAndCreateCollectionTest.java |  226 ----
 .../apache/solr/cloud/CustomCollectionTest.java |  198 ----
 ...verseerCollectionConfigSetProcessorTest.java |   28 +-
 .../solr/cloud/OverseerTaskQueueTest.java       |    1 +
 .../solr/cloud/ReplicaPropertiesBase.java       |  177 ---
 .../org/apache/solr/cloud/ShardSplitTest.java   | 1015 -----------------
 .../cloud/SimpleCollectionCreateDeleteTest.java |   64 --
 .../apache/solr/cloud/TestCollectionAPI.java    |  797 --------------
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  295 -----
 .../solr/cloud/TestHdfsCloudBackupRestore.java  |  203 ----
 .../cloud/TestLocalFSCloudBackupRestore.java    |   57 -
 .../org/apache/solr/cloud/TestPullReplica.java  |   78 +-
 .../solr/cloud/TestReplicaProperties.java       |  236 ----
 .../cloud/TestRequestStatusCollectionAPI.java   |  197 ----
 .../AbstractCloudBackupRestoreTestCase.java     |  348 ++++++
 .../solr/cloud/api/collections/AssignTest.java  |  156 +++
 .../api/collections/CollectionReloadTest.java   |   85 ++
 .../CollectionTooManyReplicasTest.java          |  222 ++++
 .../CollectionsAPIAsyncDistributedZkTest.java   |  178 +++
 .../CollectionsAPIDistributedZkTest.java        |  686 ++++++++++++
 ...ConcurrentDeleteAndCreateCollectionTest.java |  227 ++++
 .../api/collections/CustomCollectionTest.java   |  199 ++++
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 +++
 .../api/collections/ReplicaPropertiesBase.java  |  178 +++
 .../cloud/api/collections/ShardSplitTest.java   | 1017 ++++++++++++++++++
 .../SimpleCollectionCreateDeleteTest.java       |   66 ++
 .../api/collections/TestCollectionAPI.java      |  795 ++++++++++++++
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  297 +++++
 .../collections/TestHdfsCloudBackupRestore.java |  207 ++++
 .../TestLocalFSCloudBackupRestore.java          |   57 +
 .../api/collections/TestReplicaProperties.java  |  236 ++++
 .../TestRequestStatusCollectionAPI.java         |  198 ++++
 .../autoscaling/TriggerIntegrationTest.java     |   19 +-
 .../cloud/autoscaling/sim/SimCloudManager.java  |    2 +-
 .../sim/SimClusterStateProvider.java            |   14 +-
 .../cloud/autoscaling/sim/TestLargeCluster.java |   16 +-
 .../autoscaling/sim/TestTriggerIntegration.java |    9 +-
 .../cloud/cdcr/BaseCdcrDistributedZkTest.java   |   11 +-
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 ---
 .../org/apache/solr/schema/TestPointFields.java |  164 ++-
 .../function/TestMinMaxOnMultiValuedField.java  |  409 ++++++-
 .../TimeRoutedAliasUpdateProcessorTest.java     |   40 +-
 ...jetty-continuation-9.3.20.v20170531.jar.sha1 |    1 -
 .../jetty-continuation-9.4.8.v20171121.jar.sha1 |    1 +
 .../jetty-deploy-9.3.20.v20170531.jar.sha1      |    1 -
 .../jetty-deploy-9.4.8.v20171121.jar.sha1       |    1 +
 .../jetty-http-9.3.20.v20170531.jar.sha1        |    1 -
 .../jetty-http-9.4.8.v20171121.jar.sha1         |    1 +
 .../licenses/jetty-io-9.3.20.v20170531.jar.sha1 |    1 -
 solr/licenses/jetty-io-9.4.8.v20171121.jar.sha1 |    1 +
 .../jetty-jmx-9.3.20.v20170531.jar.sha1         |    1 -
 .../licenses/jetty-jmx-9.4.8.v20171121.jar.sha1 |    1 +
 .../jetty-rewrite-9.3.20.v20170531.jar.sha1     |    1 -
 .../jetty-rewrite-9.4.8.v20171121.jar.sha1      |    1 +
 .../jetty-security-9.3.20.v20170531.jar.sha1    |    1 -
 .../jetty-security-9.4.8.v20171121.jar.sha1     |    1 +
 .../jetty-server-9.3.20.v20170531.jar.sha1      |    1 -
 .../jetty-server-9.4.8.v20171121.jar.sha1       |    1 +
 .../jetty-servlet-9.3.20.v20170531.jar.sha1     |    1 -
 .../jetty-servlet-9.4.8.v20171121.jar.sha1      |    1 +
 .../jetty-servlets-9.3.20.v20170531.jar.sha1    |    1 -
 .../jetty-servlets-9.4.8.v20171121.jar.sha1     |    1 +
 .../jetty-util-9.3.20.v20170531.jar.sha1        |    1 -
 .../jetty-util-9.4.8.v20171121.jar.sha1         |    1 +
 .../jetty-webapp-9.3.20.v20170531.jar.sha1      |    1 -
 .../jetty-webapp-9.4.8.v20171121.jar.sha1       |    1 +
 .../jetty-xml-9.3.20.v20170531.jar.sha1         |    1 -
 .../licenses/jetty-xml-9.4.8.v20171121.jar.sha1 |    1 +
 solr/licenses/start.jar.sha1                    |    2 +-
 solr/server/etc/jetty-http.xml                  |    1 -
 solr/server/etc/jetty-https.xml                 |    1 -
 solr/solr-ref-guide/src/collections-api.adoc    |    2 +-
 .../src/common-query-parameters.adoc            |   22 +-
 .../detecting-languages-during-indexing.adoc    |   29 +-
 .../src/transforming-result-documents.adoc      |    8 +-
 .../DelegatingClusterStateProvider.java         |    7 +
 .../cloud/autoscaling/MoveReplicaSuggester.java |   10 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |    2 +-
 .../solrj/cloud/autoscaling/PolicyHelper.java   |   33 +
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |    7 +
 .../client/solrj/impl/ClusterStateProvider.java |    7 +-
 .../client/solrj/io/eval/ColumnAtEvaluator.java |   55 +
 .../solrj/io/eval/ColumnCountEvaluator.java     |   42 +
 .../solrj/io/eval/FeatureSelectEvaluator.java   |   93 ++
 .../solrj/io/eval/GetAttributesEvaluator.java   |   42 +
 .../solrj/io/eval/GetCentroidsEvaluator.java    |   55 +
 .../solrj/io/eval/GetClusterEvaluator.java      |   64 ++
 .../client/solrj/io/eval/IndexOfEvaluator.java  |   51 +
 .../client/solrj/io/eval/KmeansEvaluator.java   |  135 +++
 .../solr/client/solrj/io/eval/KnnEvaluator.java |  170 +++
 .../solr/client/solrj/io/eval/Matrix.java       |    8 +
 .../client/solrj/io/eval/RowAtEvaluator.java    |   56 +
 .../client/solrj/io/eval/RowCountEvaluator.java |   42 +
 .../solrj/io/eval/SetColumnLabelsEvaluator.java |   47 +
 .../solrj/io/eval/SetRowLabelsEvaluator.java    |   47 +
 .../solrj/io/eval/TermVectorsEvaluator.java     |   13 +-
 .../solrj/io/eval/TopFeaturesEvaluator.java     |  112 ++
 .../client/solrj/io/eval/UnitEvaluator.java     |    5 +-
 .../solr/client/solrj/io/stream/LetStream.java  |   28 +-
 .../apache/solr/common/cloud/ZkNodeProps.java   |   11 +-
 .../solrj/cloud/autoscaling/TestPolicy.java     |  110 ++
 .../client/solrj/embedded/JettyWebappTest.java  |    4 +-
 .../solrj/io/stream/StreamExpressionTest.java   |  268 ++++-
 .../cloud/AbstractFullDistribZkTestBase.java    |   40 +-
 .../apache/solr/cloud/MiniSolrCloudCluster.java |    2 +-
 .../org/apache/solr/util/SSLTestConfig.java     |   11 +-
 .../solr/cloud/MiniSolrCloudClusterTest.java    |    2 +-
 238 files changed, 15000 insertions(+), 12117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/278442ba/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/278442ba/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 0000000,d5ceb6a..2f4a1af
mode 000000,100644..100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@@ -1,0 -1,528 +1,544 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.solr.cloud.api.collections;
+ 
+ 
+ import java.io.IOException;
+ import java.lang.invoke.MethodHandles;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.NoSuchElementException;
+ import java.util.Properties;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicReference;
+ 
+ import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
+ import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
++import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
+ import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
+ import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+ import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+ import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+ import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+ import org.apache.solr.cloud.Overseer;
+ import org.apache.solr.cloud.ZkController;
+ import org.apache.solr.cloud.overseer.ClusterStateMutator;
+ import org.apache.solr.common.SolrException;
+ import org.apache.solr.common.SolrException.ErrorCode;
+ import org.apache.solr.common.cloud.ClusterState;
+ import org.apache.solr.common.cloud.DocRouter;
+ import org.apache.solr.common.cloud.ImplicitDocRouter;
+ import org.apache.solr.common.cloud.Replica;
+ import org.apache.solr.common.cloud.ReplicaPosition;
+ import org.apache.solr.common.cloud.ZkConfigManager;
+ import org.apache.solr.common.cloud.ZkNodeProps;
+ import org.apache.solr.common.cloud.ZkStateReader;
+ import org.apache.solr.common.cloud.ZooKeeperException;
+ import org.apache.solr.common.params.CollectionAdminParams;
+ import org.apache.solr.common.params.CommonAdminParams;
+ import org.apache.solr.common.params.CoreAdminParams;
+ import org.apache.solr.common.params.ModifiableSolrParams;
+ import org.apache.solr.common.util.NamedList;
+ import org.apache.solr.common.util.SimpleOrderedMap;
+ import org.apache.solr.common.util.TimeSource;
+ import org.apache.solr.common.util.Utils;
+ import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
+ import org.apache.solr.handler.component.ShardHandler;
+ import org.apache.solr.handler.component.ShardRequest;
+ import org.apache.solr.util.TimeOut;
+ import org.apache.zookeeper.CreateMode;
+ import org.apache.zookeeper.KeeperException;
+ import org.apache.zookeeper.KeeperException.NoNodeException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+ import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+ import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+ import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+ import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+ import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+ import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+ import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+ import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
+ import static org.apache.solr.common.params.CommonParams.NAME;
+ import static org.apache.solr.common.util.StrUtils.formatString;
+ 
+ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+   private final OverseerCollectionMessageHandler ocmh;
+   private final TimeSource timeSource;
+   private final DistribStateManager stateManager;
+ 
+   public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+     this.ocmh = ocmh;
+     this.stateManager = ocmh.cloudManager.getDistribStateManager();
+     this.timeSource = ocmh.cloudManager.getTimeSource();
+   }
+ 
+   @Override
+   public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+     final String collectionName = message.getStr(NAME);
+     final boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
+     log.info("Create collection {}", collectionName);
+     if (clusterState.hasCollection(collectionName)) {
+       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
+     }
+ 
+     String configName = getConfigName(collectionName, message);
+     if (configName == null) {
+       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to associate with the collection.");
+     }
+ 
+     ocmh.validateConfigOrThrowSolrException(configName);
+     AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+ 
+     try {
+ 
+       final String async = message.getStr(ASYNC);
+ 
+       List<String> nodeList = new ArrayList<>();
+       List<String> shardNames = new ArrayList<>();
+       List<ReplicaPosition> replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, message,
+           nodeList, shardNames, sessionWrapper);
+       ZkStateReader zkStateReader = ocmh.zkStateReader;
+       boolean isLegacyCloud = Overseer.isLegacy(zkStateReader);
+ 
+       ocmh.createConfNode(stateManager, configName, collectionName, isLegacyCloud);
+ 
+       Map<String,String> collectionParams = new HashMap<>();
+       Map<String,Object> collectionProps = message.getProperties();
+       for (String propName : collectionProps.keySet()) {
+         if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
+           collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), (String) collectionProps.get(propName));
+         }
+       }
+       
+       createCollectionZkNode(stateManager, collectionName, collectionParams);
+       
+       Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+ 
+       // wait for a while until we don't see the collection
+       TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+       boolean created = false;
+       while (! waitUntil.hasTimedOut()) {
+         waitUntil.sleep(100);
+         created = ocmh.cloudManager.getClusterStateProvider().getClusterState().hasCollection(collectionName);
+         if(created) break;
+       }
+       if (!created)
+         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
+ 
+       if (nodeList.isEmpty()) {
+         log.debug("Finished create command for collection: {}", collectionName);
+         return;
+       }
+ 
+       // For tracking async calls.
+       Map<String, String> requestMap = new HashMap<>();
+ 
+ 
+       log.debug(formatString("Creating SolrCores for new collection {0}, shardNames {1} , message : {2}",
+           collectionName, shardNames, message));
+       Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
+       ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+       for (ReplicaPosition replicaPosition : replicaPositions) {
+         String nodeName = replicaPosition.node;
+         String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
+             ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
+             replicaPosition.shard, replicaPosition.type, true);
+         log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
+             , coreName, replicaPosition.shard, collectionName, nodeName));
+ 
+ 
+         String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
+         //in the new mode, create the replica in clusterstate prior to creating the core.
+         // Otherwise the core creation fails
+         if (!isLegacyCloud) {
+           ZkNodeProps props = new ZkNodeProps(
+               Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
+               ZkStateReader.COLLECTION_PROP, collectionName,
+               ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
+               ZkStateReader.CORE_NAME_PROP, coreName,
+               ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+               ZkStateReader.BASE_URL_PROP, baseUrl,
+               ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
+               CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+           Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+         }
+ 
+         // Need to create new params for each request
+         ModifiableSolrParams params = new ModifiableSolrParams();
+         params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
+ 
+         params.set(CoreAdminParams.NAME, coreName);
+         params.set(COLL_CONF, configName);
+         params.set(CoreAdminParams.COLLECTION, collectionName);
+         params.set(CoreAdminParams.SHARD, replicaPosition.shard);
+         params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
+         params.set(CoreAdminParams.NEW_COLLECTION, "true");
+         params.set(CoreAdminParams.REPLICA_TYPE, replicaPosition.type.name());
+ 
+         if (async != null) {
+           String coreAdminAsyncId = async + Math.abs(System.nanoTime());
+           params.add(ASYNC, coreAdminAsyncId);
+           requestMap.put(nodeName, coreAdminAsyncId);
+         }
+         ocmh.addPropertyParams(message, params);
+ 
+         ShardRequest sreq = new ShardRequest();
+         sreq.nodeName = nodeName;
+         params.set("qt", ocmh.adminPath);
+         sreq.purpose = 1;
+         sreq.shards = new String[]{baseUrl};
+         sreq.actualShards = sreq.shards;
+         sreq.params = params;
+ 
+         if (isLegacyCloud) {
+           shardHandler.submit(sreq, sreq.shards[0], sreq.params);
+         } else {
+           coresToCreate.put(coreName, sreq);
+         }
+       }
+ 
+       if(!isLegacyCloud) {
+         // wait for all replica entries to be created
+         Map<String, Replica> replicas = ocmh.waitToSeeReplicasInState(collectionName, coresToCreate.keySet());
+         for (Map.Entry<String, ShardRequest> e : coresToCreate.entrySet()) {
+           ShardRequest sreq = e.getValue();
+           sreq.params.set(CoreAdminParams.CORE_NODE_NAME, replicas.get(e.getKey()).getName());
+           shardHandler.submit(sreq, sreq.shards[0], sreq.params);
+         }
+       }
+ 
+       ocmh.processResponses(results, shardHandler, false, null, async, requestMap, Collections.emptySet());
+       if(results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size() > 0) {
+         // Let's cleanup as we hit an exception
+         // We shouldn't be passing 'results' here for the cleanup as the response would then contain 'success'
+         // element, which may be interpreted by the user as a positive ack
+         ocmh.cleanupCollection(collectionName, new NamedList());
+         log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
+       } else {
+         log.debug("Finished create command on all shards for collection: {}", collectionName);
+ 
+         // Emit a warning about production use of data driven functionality
+         boolean defaultConfigSetUsed = message.getStr(COLL_CONF) == null ||
+             message.getStr(COLL_CONF).equals(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
+         if (defaultConfigSetUsed) {
+           results.add("warning", "Using _default configset. Data driven schema functionality"
+               + " is enabled by default, which is NOT RECOMMENDED for production use. To turn it off:"
+               + " curl http://{host:port}/solr/" + collectionName + "/config -d '{\"set-user-property\": {\"update.autoCreateFields\":\"false\"}}'");
+         }
+       }
+     } catch (SolrException ex) {
+       throw ex;
+     } catch (Exception ex) {
+       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, ex);
+     } finally {
+       if (sessionWrapper.get() != null) sessionWrapper.get().release();
+     }
+   }
+ 
+   public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                             ZkNodeProps message,
+                                                             List<String> nodeList, List<String> shardNames,
+                                                             AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+     final String collectionName = message.getStr(NAME);
+     // look at the replication factor and see if it matches reality
+     // if it does not, find best nodes to create more cores
+     int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
+     int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
+     int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
+     AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+     String policy = message.getStr(Policy.POLICY);
+     boolean usePolicyFramework = !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || policy != null;
+ 
+     Integer numSlices = message.getInt(OverseerCollectionMessageHandler.NUM_SLICES, null);
+     String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
+     if(ImplicitDocRouter.NAME.equals(router)){
+       ClusterStateMutator.getShardNames(shardNames, message.getStr("shards", null));
+       numSlices = shardNames.size();
+     } else {
+       if (numSlices == null ) {
+         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " is a required param (when using CompositeId router).");
+       }
+       if (numSlices <= 0) {
+         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " must be > 0");
+       }
+       ClusterStateMutator.getShardNames(numSlices, shardNames);
+     }
+ 
+     int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
+     if (usePolicyFramework && message.getStr(MAX_SHARDS_PER_NODE) != null && maxShardsPerNode > 0) {
+       throw new SolrException(ErrorCode.BAD_REQUEST, "'maxShardsPerNode>0' is not supported when autoScaling policies are used");
+     }
+     if (maxShardsPerNode == -1 || usePolicyFramework) maxShardsPerNode = Integer.MAX_VALUE;
+     if (numNrtReplicas + numTlogReplicas <= 0) {
+       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
+     }
+ 
+     // we need to look at every node and see how many cores it serves
+     // add our new cores to existing nodes serving the least number of cores
+     // but (for now) require that each core goes on a distinct node.
+ 
+     List<ReplicaPosition> replicaPositions;
+     nodeList.addAll(Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM));
+     if (nodeList.isEmpty()) {
+       log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
+ 
+       replicaPositions = new ArrayList<>();
+     } else {
+       int totalNumReplicas = numNrtReplicas + numTlogReplicas + numPullReplicas;
+       if (totalNumReplicas > nodeList.size()) {
+         log.warn("Specified number of replicas of "
+             + totalNumReplicas
+             + " on collection "
+             + collectionName
+             + " is higher than the number of Solr instances currently live or live and part of your " + OverseerCollectionMessageHandler.CREATE_NODE_SET + "("
+             + nodeList.size()
+             + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
+       }
+ 
+       int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
+           Integer.MAX_VALUE :
+           maxShardsPerNode * nodeList.size();
+       int requestedShardsToCreate = numSlices * totalNumReplicas;
+       if (maxShardsAllowedToCreate < requestedShardsToCreate) {
+         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
+             + MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
+             + ", and the number of nodes currently live or live and part of your "+OverseerCollectionMessageHandler.CREATE_NODE_SET+" is " + nodeList.size()
+             + ". This allows a maximum of " + maxShardsAllowedToCreate
+             + " to be created. Value of " + OverseerCollectionMessageHandler.NUM_SLICES + " is " + numSlices
+             + ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
+             + ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
+             + " and value of " + PULL_REPLICAS + " is " + numPullReplicas
+             + ". This requires " + requestedShardsToCreate
+             + " shards to be created (higher than the allowed number)");
+       }
+       replicaPositions = Assign.identifyNodes(cloudManager
+           , clusterState, nodeList, collectionName, message, shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas);
+       sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+     }
+     return replicaPositions;
+   }
+ 
+   String getConfigName(String coll, ZkNodeProps message) throws KeeperException, InterruptedException {
+     String configName = message.getStr(COLL_CONF);
+ 
+     if (configName == null) {
+       // if there is only one conf, use that
+       List<String> configNames = null;
+       try {
+         configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
+         if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
+           if (CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
+             return coll;
+           } else {
+             String intendedConfigSetName = ConfigSetsHandlerApi.getSuffixedNameForAutoGeneratedConfigSet(coll);
+             copyDefaultConfigSetTo(configNames, intendedConfigSetName);
+             return intendedConfigSetName;
+           }
+         } else if (configNames != null && configNames.size() == 1) {
+           configName = configNames.get(0);
+           // no config set named, but there is only 1 - use it
+           log.info("Only one config set found in zk - using it:" + configName);
+         }
+       } catch (KeeperException.NoNodeException e) {
+ 
+       }
+     }
+     return "".equals(configName)? null: configName;
+   }
+   
+   /**
+    * Copies the _default configset to the specified configset name (overwrites if pre-existing)
+    */
+   private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
+     ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
+ 
+     // if a configset named collection exists, re-use it
+     if (configNames.contains(targetConfig)) {
+       log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
+           ", re-using it.");
+       return;
+     }
+     // Copy _default into targetConfig
+     try {
+       configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
+     } catch (Exception e) {
+       throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
+     }
+   }
+ 
+   public static void createCollectionZkNode(DistribStateManager stateManager, String collection, Map<String,String> params) {
+     log.debug("Check for collection zkNode:" + collection);
+     String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
 -
++    // clean up old terms node
++    String termsPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/terms";
++    try {
++      if (stateManager.hasData(termsPath)) {
++        List<String> paths = stateManager.listData(termsPath);
++        for (String path : paths) {
++          stateManager.removeData(termsPath + "/" + path, -1);
++        }
++        stateManager.removeData(termsPath, -1);
++      }
++    } catch (InterruptedException e) {
++      Thread.interrupted();
++      throw new SolrException(ErrorCode.SERVER_ERROR, "Error deleting old term nodes for collection from Zookeeper", e);
++    } catch (KeeperException | IOException | BadVersionException e) {
++      throw new SolrException(ErrorCode.SERVER_ERROR, "Error deleting old term nodes for collection from Zookeeper", e);
++    }
+     try {
+       if (!stateManager.hasData(collectionPath)) {
+         log.debug("Creating collection in ZooKeeper:" + collection);
+ 
+         try {
+           Map<String,Object> collectionProps = new HashMap<>();
+ 
+           // TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that?
+           String defaultConfigName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, collection);
+ 
+           if (params.size() > 0) {
+             collectionProps.putAll(params);
+             // if the config name wasn't passed in, use the default
+             if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP)) {
+               // users can create the collection node and conf link ahead of time, or this may return another option
+               getConfName(stateManager, collection, collectionPath, collectionProps);
+             }
+ 
+           } else if (System.getProperty("bootstrap_confdir") != null) {
+             // if we are bootstrapping a collection, default the config for
+             // a new collection to the collection we are bootstrapping
+             log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
+ 
+             Properties sysProps = System.getProperties();
+             for (String sprop : System.getProperties().stringPropertyNames()) {
+               if (sprop.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
+                 collectionProps.put(sprop.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
+               }
+             }
+ 
+             // if the config name wasn't passed in, use the default
+             if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP))
+               collectionProps.put(ZkController.CONFIGNAME_PROP, defaultConfigName);
+ 
+           } else if (Boolean.getBoolean("bootstrap_conf")) {
+             // the conf name should should be the collection name of this core
+             collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
+           } else {
+             getConfName(stateManager, collection, collectionPath, collectionProps);
+           }
+ 
+           collectionProps.remove(ZkStateReader.NUM_SHARDS_PROP);  // we don't put numShards in the collections properties
+ 
+           ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
+           stateManager.makePath(collectionPath, Utils.toJSON(zkProps), CreateMode.PERSISTENT, false);
+ 
+         } catch (KeeperException e) {
+           // it's okay if the node already exists
+           if (e.code() != KeeperException.Code.NODEEXISTS) {
+             throw e;
+           }
+         } catch (AlreadyExistsException e) {
+           // it's okay if the node already exists
+         }
+       } else {
+         log.debug("Collection zkNode exists");
+       }
+ 
+     } catch (KeeperException e) {
+       // it's okay if another beats us creating the node
+       if (e.code() == KeeperException.Code.NODEEXISTS) {
+         return;
+       }
+       throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+     } catch (IOException e) {
+       throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+     } catch (InterruptedException e) {
+       Thread.interrupted();
+       throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+     }
+ 
+   }
+   
+   private static void getConfName(DistribStateManager stateManager, String collection, String collectionPath, Map<String,Object> collectionProps) throws IOException,
+       KeeperException, InterruptedException {
+     // check for configName
+     log.debug("Looking for collection configName");
+     if (collectionProps.containsKey("configName")) {
+       log.info("configName was passed as a param {}", collectionProps.get("configName"));
+       return;
+     }
+ 
+     List<String> configNames = null;
+     int retry = 1;
+     int retryLimt = 6;
+     for (; retry < retryLimt; retry++) {
+       if (stateManager.hasData(collectionPath)) {
+         VersionedData data = stateManager.getData(collectionPath);
+         ZkNodeProps cProps = ZkNodeProps.load(data.getData());
+         if (cProps.containsKey(ZkController.CONFIGNAME_PROP)) {
+           break;
+         }
+       }
+ 
+       try {
+         configNames = stateManager.listData(ZkConfigManager.CONFIGS_ZKNODE);
+       } catch (NoSuchElementException | NoNodeException e) {
+         // just keep trying
+       }
+ 
+       // check if there's a config set with the same name as the collection
+       if (configNames != null && configNames.contains(collection)) {
+         log.info(
+             "Could not find explicit collection configName, but found config name matching collection name - using that set.");
+         collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
+         break;
+       }
+       // if _default exists, use that
+       if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
+         log.info(
+             "Could not find explicit collection configName, but found _default config set - using that set.");
+         collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
+         break;
+       }
+       // if there is only one conf, use that
+       if (configNames != null && configNames.size() == 1) {
+         // no config set named, but there is only 1 - use it
+         log.info("Only one config set found in zk - using it:" + configNames.get(0));
+         collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
+         break;
+       }
+ 
+       log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
+       Thread.sleep(3000);
+     }
+     if (retry == retryLimt) {
+       log.error("Could not find configName for collection " + collection);
+       throw new ZooKeeperException(
+           SolrException.ErrorCode.SERVER_ERROR,
+           "Could not find configName for collection " + collection + " found:" + configNames);
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/278442ba/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/278442ba/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
----------------------------------------------------------------------


[27/41] lucene-solr:jira/solr-11702: SOLR-11736: Rename knn Streaming Expression to knnSearch and add new knn Stream Evaluator

Posted by da...@apache.org.
SOLR-11736: Rename knn Streaming Expression to knnSearch and add new knn Stream Evaluator


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/5e2ef5eb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/5e2ef5eb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/5e2ef5eb

Branch: refs/heads/jira/solr-11702
Commit: 5e2ef5eb73d23cd98af2ebec5cc14730d19c4ca4
Parents: 6781a0d
Author: Joel Bernstein <jb...@apache.org>
Authored: Tue Jan 16 19:19:45 2018 -0500
Committer: Joel Bernstein <jb...@apache.org>
Committed: Tue Jan 16 19:20:39 2018 -0500

----------------------------------------------------------------------
 .../org/apache/solr/handler/StreamHandler.java  |   4 +-
 .../solrj/io/eval/GetAttributesEvaluator.java   |  42 +++++
 .../solr/client/solrj/io/eval/KnnEvaluator.java | 170 +++++++++++++++++++
 .../solrj/io/stream/StreamExpressionTest.java   |  73 +++++++-
 4 files changed, 282 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5e2ef5eb/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index 206136c..aa602860 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -127,7 +127,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
         .withFunctionName("topic", TopicStream.class)
         .withFunctionName("commit", CommitStream.class)
         .withFunctionName("random", RandomStream.class)
-        .withFunctionName("knn", KnnStream.class)
+        .withFunctionName("knnSearch", KnnStream.class)
 
         // decorator streams
         .withFunctionName("merge", MergeStream.class)
@@ -305,6 +305,8 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
         .withFunctionName("colAt", ColumnAtEvaluator.class)
         .withFunctionName("setColumnLabels", SetColumnLabelsEvaluator.class)
         .withFunctionName("setRowLabels", SetRowLabelsEvaluator.class)
+        .withFunctionName("knn", KnnEvaluator.class)
+        .withFunctionName("getAttributes", GetAttributesEvaluator.class)
 
         // Boolean Stream Evaluators
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5e2ef5eb/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetAttributesEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetAttributesEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetAttributesEvaluator.java
new file mode 100644
index 0000000..b1c846e
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetAttributesEvaluator.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class GetAttributesEvaluator extends RecursiveObjectEvaluator implements OneValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public GetAttributesEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value) throws IOException {
+    if(!(value instanceof Attributes)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting an Attributes",toExpression(constructingFactory), value.getClass().getSimpleName()));
+    } else {
+      Attributes attributes = (Attributes)value;
+      return attributes.getAttributes();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5e2ef5eb/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KnnEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KnnEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KnnEvaluator.java
new file mode 100644
index 0000000..665530e
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KnnEvaluator.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.commons.math3.ml.distance.CanberraDistance;
+import org.apache.commons.math3.ml.distance.DistanceMeasure;
+import org.apache.commons.math3.ml.distance.EarthMoversDistance;
+import org.apache.commons.math3.ml.distance.EuclideanDistance;
+import org.apache.commons.math3.ml.distance.ManhattanDistance;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class KnnEvaluator extends RecursiveObjectEvaluator implements ManyValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  private DistanceMeasure distanceMeasure;
+
+  public KnnEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    DistanceEvaluator.DistanceType type = null;
+    List<StreamExpressionNamedParameter> namedParams = factory.getNamedOperands(expression);
+    if(namedParams.size() > 0) {
+      if (namedParams.size() > 1) {
+        throw new IOException("distance function expects only one named parameter 'distance'.");
+      }
+
+      StreamExpressionNamedParameter namedParameter = namedParams.get(0);
+      String name = namedParameter.getName();
+      if (!name.equalsIgnoreCase("distance")) {
+        throw new IOException("distance function expects only one named parameter 'distance'.");
+      }
+
+      String typeParam = namedParameter.getParameter().toString().trim();
+      type= DistanceEvaluator.DistanceType.valueOf(typeParam);
+    } else {
+      type = DistanceEvaluator.DistanceType.euclidean;
+    }
+
+    if (type.equals(DistanceEvaluator.DistanceType.euclidean)) {
+      distanceMeasure = new EuclideanDistance();
+    } else if (type.equals(DistanceEvaluator.DistanceType.manhattan)) {
+      distanceMeasure = new ManhattanDistance();
+    } else if (type.equals(DistanceEvaluator.DistanceType.canberra)) {
+      distanceMeasure = new CanberraDistance();
+    } else if (type.equals(DistanceEvaluator.DistanceType.earthMovers)) {
+      distanceMeasure = new EarthMoversDistance();
+    }
+
+  }
+
+  @Override
+  public Object doWork(Object... values) throws IOException {
+
+    if(values.length < 3) {
+      throw new IOException("knn expects three parameters a Matrix, numeric array and k");
+    }
+
+    Matrix matrix = null;
+    double[] vec = null;
+    int k = 0;
+
+    if(values[0] instanceof Matrix) {
+      matrix = (Matrix)values[0];
+    } else {
+      throw new IOException("The first parameter for knn should be a matrix.");
+    }
+
+    if(values[1] instanceof List) {
+      List<Number> nums = (List<Number>)values[1];
+      vec = new double[nums.size()];
+      for(int i=0; i<nums.size(); i++) {
+        vec[i] = nums.get(i).doubleValue();
+      }
+    } else {
+      throw new IOException("The second parameter for knn should be a numeric array.");
+    }
+
+    if(values[2] instanceof Number) {
+      k = ((Number)values[2]).intValue();
+    } else {
+      throw new IOException("The third parameter for knn should be k.");
+    }
+
+    double[][] data = matrix.getData();
+
+    TreeSet<Neighbor> neighbors = new TreeSet();
+    for(int i=0; i<data.length; i++) {
+      double distance = distanceMeasure.compute(vec, data[i]);
+      neighbors.add(new Neighbor(i, distance));
+      if(neighbors.size() > k) {
+        neighbors.pollLast();
+      }
+    }
+
+    double[][] out = new double[neighbors.size()][];
+    List<String> rowLabels = matrix.getRowLabels();
+    List<String> newRowLabels = new ArrayList();
+    List<Number> distances = new ArrayList();
+    int i=-1;
+
+    while(neighbors.size() > 0) {
+      Neighbor neighbor = neighbors.pollFirst();
+      int rowIndex = neighbor.getRow();
+
+      if(rowLabels != null) {
+        newRowLabels.add(rowLabels.get(rowIndex));
+      }
+
+      out[++i] = data[rowIndex];
+      distances.add(neighbor.getDistance());
+    }
+
+    Matrix knn = new Matrix(out);
+
+    if(rowLabels != null) {
+      knn.setRowLabels(newRowLabels);
+    }
+
+    knn.setColumnLabels(matrix.getColumnLabels());
+    knn.setAttribute("distances", distances);
+    return knn;
+  }
+
+  public static class Neighbor implements Comparable<Neighbor> {
+
+    private Double distance;
+    private int row;
+
+    public Neighbor(int row, double distance) {
+      this.distance = distance;
+      this.row = row;
+    }
+
+    public int getRow() {
+      return this.row;
+    }
+
+    public Double getDistance() {
+      return distance;
+    }
+
+    public int compareTo(Neighbor neighbor) {
+      return this.distance.compareTo(neighbor.getDistance());
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5e2ef5eb/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 6f1e61f..1493562 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -933,7 +933,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
-  public void testKnnStream() throws Exception {
+  public void testKnnSearchStream() throws Exception {
 
     UpdateRequest update = new UpdateRequest();
     update.add(id, "1", "a_t", "hello world have a very nice day blah");
@@ -947,7 +947,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     try {
       context.setSolrClientCache(cache);
       ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream"));
-      sParams.add("expr", "knn(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"4\", fl=\"id, score\", mintf=\"1\")");
+      sParams.add("expr", "knnSearch(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"4\", fl=\"id, score\", mintf=\"1\")");
       JettySolrRunner jetty = cluster.getJettySolrRunner(0);
       SolrStream solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams);
       List<Tuple> tuples = getTuples(solrStream);
@@ -955,26 +955,26 @@ public class StreamExpressionTest extends SolrCloudTestCase {
       assertOrder(tuples, 2, 3, 4);
 
       sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream"));
-      sParams.add("expr", "knn(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", k=\"2\", fl=\"id, score\", mintf=\"1\")");
+      sParams.add("expr", "knnSearch(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", k=\"2\", fl=\"id, score\", mintf=\"1\")");
       solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams);
       tuples = getTuples(solrStream);
       assertTrue(tuples.size() == 2);
       assertOrder(tuples, 2, 3);
 
       sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream"));
-      sParams.add("expr", "knn(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"4\", fl=\"id, score\", mintf=\"1\", maxdf=\"0\")");
+      sParams.add("expr", "knnSearch(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"4\", fl=\"id, score\", mintf=\"1\", maxdf=\"0\")");
       solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams);
       tuples = getTuples(solrStream);
       assertTrue(tuples.size() == 0);
 
       sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream"));
-      sParams.add("expr", "knn(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"4\", fl=\"id, score\", mintf=\"1\", maxwl=\"1\")");
+      sParams.add("expr", "knnSearch(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"4\", fl=\"id, score\", mintf=\"1\", maxwl=\"1\")");
       solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams);
       tuples = getTuples(solrStream);
       assertTrue(tuples.size() == 0);
 
       sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream"));
-      sParams.add("expr", "knn(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"2\", fl=\"id, score\", mintf=\"1\", minwl=\"20\")");
+      sParams.add("expr", "knnSearch(" + COLLECTIONORALIAS + ", id=\"1\", qf=\"a_t\", rows=\"2\", fl=\"id, score\", mintf=\"1\", minwl=\"20\")");
       solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams);
       tuples = getTuples(solrStream);
       assertTrue(tuples.size() == 0);
@@ -7734,6 +7734,67 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     assertEquals(density.doubleValue(), 0.007852638121596995, .00001);
   }
 
+  @Test
+  public void testKnn() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=setRowLabels(matrix(array(1,1,1,0,0,0),"+
+        "                                     array(1,0,0,0,1,1),"+
+        "                                     array(0,0,0,1,1,1)), array(row1,row2,row3)),"+
+        "               b=array(0,0,0,1,1,1),"+
+        "               c=knn(a, b, 2),"+
+        "               d=getRowLabels(c),"+
+        "               e=getAttributes(c)," +
+        "               f=knn(a, b, 2, distance=manhattan)," +
+        "               g=getAttributes(f))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+
+    List<List<Number>> knnMatrix = (List<List<Number>>)tuples.get(0).get("c");
+    assertEquals(knnMatrix.size(), 2);
+
+    List<Number> row1 = knnMatrix.get(0);
+    assertEquals(row1.size(), 6);
+    assertEquals(row1.get(0).doubleValue(), 0.0, 0.0);
+    assertEquals(row1.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(row1.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(row1.get(3).doubleValue(), 1.0, 0.0);
+    assertEquals(row1.get(4).doubleValue(), 1.0, 0.0);
+    assertEquals(row1.get(5).doubleValue(), 1.0, 0.0);
+
+    List<Number> row2 = knnMatrix.get(1);
+    assertEquals(row2.size(), 6);
+
+    assertEquals(row2.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(row2.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(row2.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(row2.get(3).doubleValue(), 0.0, 0.0);
+    assertEquals(row2.get(4).doubleValue(), 1.0, 0.0);
+    assertEquals(row2.get(5).doubleValue(), 1.0, 0.0);
+
+    Map atts = (Map)tuples.get(0).get("e");
+    List<Number> dists = (List<Number>)atts.get("distances");
+    assertEquals(dists.size(), 2);
+    assertEquals(dists.get(0).doubleValue(), 0.0, 0.0);
+    assertEquals(dists.get(1).doubleValue(), 1.4142135623730951, 0.0);
+
+    List<String> rowLabels = (List<String>)tuples.get(0).get("d");
+    assertEquals(rowLabels.size(), 2);
+    assertEquals(rowLabels.get(0), "row3");
+    assertEquals(rowLabels.get(1), "row2");
+
+    atts = (Map)tuples.get(0).get("g");
+    dists = (List<Number>)atts.get("distances");
+    assertEquals(dists.size(), 2);
+    assertEquals(dists.get(0).doubleValue(), 0.0, 0.0);
+    assertEquals(dists.get(1).doubleValue(), 2.0, 0.0);
+  }
 
   @Test
   public void testIntegrate() throws Exception {


[32/41] lucene-solr:jira/solr-11702: SOLR-11747: Pause triggers until actions finish executing and the cool down period expires

Posted by da...@apache.org.
SOLR-11747: Pause triggers until actions finish executing and the cool down period expires


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/54253534
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/54253534
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/54253534

Branch: refs/heads/jira/solr-11702
Commit: 5425353402641307d71af727ff18c63e4579c5c1
Parents: f491fad
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Thu Jan 18 18:19:24 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Thu Jan 18 18:19:24 2018 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  7 ++++
 .../cloud/autoscaling/ScheduledTriggers.java    | 36 ++++++++++++++++++--
 .../autoscaling/TriggerIntegrationTest.java     | 19 +++--------
 .../cloud/autoscaling/sim/TestLargeCluster.java | 16 +--------
 .../autoscaling/sim/TestTriggerIntegration.java |  9 ++---
 5 files changed, 48 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/54253534/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index a6b2415..7f63679 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -62,6 +62,11 @@ Upgrade Notes
 
 * SOLR-11809: QueryComponent's rq parameter parsing no longer considers the defType parameter.
 
+* SOLR-11747: The behaviour of the autoscaling system has been modified to pause all triggers from execution between
+  the start of actions and end of cool down period. The triggers will be resumed after the cool down period expires.
+  Previously, the cool down period was a fixed period started after actions for a trigger event finish
+  executing. During the cool down period, triggers wo
+
 New Features
 ----------------------
 * SOLR-11285: Simulation framework for autoscaling. (ab)
@@ -156,6 +161,8 @@ Other Changes
 
 * SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
 
+* SOLR-11747: Pause triggers until actions finish executing and the cool down period expires. (shalin)
+
 ==================  7.2.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/54253534/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java
index 25ec444..965299c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ScheduledTriggers.java
@@ -250,7 +250,8 @@ public class ScheduledTriggers implements Closeable {
         // we do not want to lose this event just because the trigger was closed, perhaps a replacement will need it
         return false;
       }
-      // reject events during cooldown period
+      // even though we pause all triggers during action execution there is a possibility that a trigger was already
+      // running at the time and would have already created an event so we reject such events during cooldown period
       if (cooldownStart.get() + cooldownPeriod.get() > cloudManager.getTimeSource().getTime()) {
         log.debug("-------- Cooldown period - rejecting event: " + event);
         event.getProperties().put(TriggerEvent.COOLDOWN, true);
@@ -260,6 +261,9 @@ public class ScheduledTriggers implements Closeable {
         log.debug("++++++++ Cooldown inactive - processing event: " + event);
       }
       if (hasPendingActions.compareAndSet(false, true)) {
+        // pause all triggers while we execute actions so triggers do not operate on a cluster in transition
+        pauseTriggers();
+
         final boolean enqueued;
         if (replaying) {
           enqueued = false;
@@ -271,7 +275,7 @@ public class ScheduledTriggers implements Closeable {
         List<TriggerAction> actions = source.getActions();
         if (actions != null) {
           if (actionExecutor.isShutdown()) {
-            String msg = String.format(Locale.ROOT, "Ignoring autoscaling event %s because the executor has already been closed", event.toString(), source);
+            String msg = String.format(Locale.ROOT, "Ignoring autoscaling event %s from trigger %s because the executor has already been closed", event.toString(), source);
             listeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.ABORTED, msg);
             log.warn(msg);
             // we do not want to lose this event just because the trigger was closed, perhaps a replacement will need it
@@ -311,6 +315,8 @@ public class ScheduledTriggers implements Closeable {
             } finally {
               cooldownStart.set(cloudManager.getTimeSource().getTime());
               hasPendingActions.set(false);
+              // resume triggers after cool down period
+              resumeTriggers(cloudManager.getTimeSource().convertDelay(TimeUnit.NANOSECONDS, cooldownPeriod.get(), TimeUnit.MILLISECONDS));
             }
             log.debug("-- processing took {} ms for event id={}",
                 TimeUnit.NANOSECONDS.toMillis(cloudManager.getTimeSource().getTime() - eventProcessingStart), event.id);
@@ -325,6 +331,8 @@ public class ScheduledTriggers implements Closeable {
           }
           listeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.SUCCEEDED);
           hasPendingActions.set(false);
+          // resume triggers now
+          resumeTriggers(0);
         }
         return true;
       } else {
@@ -339,6 +347,30 @@ public class ScheduledTriggers implements Closeable {
         TimeUnit.MILLISECONDS);
   }
 
+  /**
+   * Pauses all scheduled trigger invocations without interrupting any that are in progress
+   */
+  private synchronized void pauseTriggers()  {
+    if (log.isDebugEnabled()) {
+      log.debug("Pausing all triggers: {}", scheduledTriggers.keySet());
+    }
+    scheduledTriggers.forEach((s, scheduledTrigger) -> scheduledTrigger.scheduledFuture.cancel(false));
+  }
+
+  /**
+   * Resumes all previously cancelled triggers to be scheduled after the given initial delay
+   * @param afterDelayMillis the initial delay in milliseconds after which triggers should be resumed
+   */
+  private synchronized void resumeTriggers(long afterDelayMillis) {
+    scheduledTriggers.forEach((s, scheduledTrigger) ->  {
+      if (scheduledTrigger.scheduledFuture.isCancelled()) {
+        log.debug("Resuming trigger: {} after {}ms", s, afterDelayMillis);
+        scheduledTrigger.scheduledFuture = scheduledThreadPoolExecutor.scheduleWithFixedDelay(scheduledTrigger, afterDelayMillis,
+            cloudManager.getTimeSource().convertDelay(TimeUnit.SECONDS, triggerDelay.get(), TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
+      }
+    });
+  }
+
   private void waitForPendingTasks(AutoScaling.Trigger newTrigger, List<TriggerAction> actions) throws AlreadyClosedException {
     DistribStateManager stateManager = cloudManager.getDistribStateManager();
     try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/54253534/solr/core/src/test/org/apache/solr/cloud/autoscaling/TriggerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TriggerIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TriggerIntegrationTest.java
index 639f240..3bce457 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/TriggerIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/TriggerIntegrationTest.java
@@ -1194,13 +1194,8 @@ public class TriggerIntegrationTest extends SolrCloudTestCase {
 
     // there must be at least one IGNORED event due to cooldown, and one SUCCEEDED event
     capturedEvents = listenerEvents.get("bar");
-    assertTrue(capturedEvents.toString(), capturedEvents.size() > 1);
-    for (int i = 0; i < capturedEvents.size() - 1; i++) {
-      CapturedEvent ev = capturedEvents.get(i);
-      assertEquals(ev.toString(), TriggerEventProcessorStage.IGNORED, ev.stage);
-      assertTrue(ev.toString(), ev.message.contains("cooldown"));
-    }
-    CapturedEvent ev = capturedEvents.get(capturedEvents.size() - 1);
+    assertEquals(capturedEvents.toString(),1,  capturedEvents.size());
+    CapturedEvent ev = capturedEvents.get(0);
     assertEquals(ev.toString(), TriggerEventProcessorStage.SUCCEEDED, ev.stage);
     // the difference between timestamps of the first SUCCEEDED and the last SUCCEEDED
     // must be larger than cooldown period
@@ -1235,19 +1230,13 @@ public class TriggerIntegrationTest extends SolrCloudTestCase {
     // wait for listener to capture the SUCCEEDED stage
     Thread.sleep(2000);
 
-    // there must be at least one SUCCEEDED (due to newNode3) then for newNode4 one IGNORED
-    // event due to cooldown, and one SUCCEEDED
+    // there must be two SUCCEEDED (due to newNode3 and newNode4) and maybe some ignored events
     capturedEvents = listenerEvents.get("bar");
-    assertTrue(capturedEvents.toString(), capturedEvents.size() > 2);
+    assertTrue(capturedEvents.toString(), capturedEvents.size() >= 2);
     // first event should be SUCCEEDED
     ev = capturedEvents.get(0);
     assertEquals(ev.toString(), TriggerEventProcessorStage.SUCCEEDED, ev.stage);
 
-    for (int i = 1; i < capturedEvents.size() - 1; i++) {
-      ev = capturedEvents.get(i);
-      assertEquals(ev.toString(), TriggerEventProcessorStage.IGNORED, ev.stage);
-      assertTrue(ev.toString(), ev.message.contains("cooldown"));
-    }
     ev = capturedEvents.get(capturedEvents.size() - 1);
     assertEquals(ev.toString(), TriggerEventProcessorStage.SUCCEEDED, ev.stage);
     // the difference between timestamps of the first SUCCEEDED and the last SUCCEEDED

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/54253534/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
index 3adf652..e9c686b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
@@ -254,21 +254,7 @@ public class TestLargeCluster extends SimSolrCloudTestCase {
     }
     assertTrue("no STARTED event", startedEventPos > -1);
     SolrInputDocument startedEvent = systemColl.get(startedEventPos);
-    int ignored = 0;
     int lastIgnoredPos = startedEventPos;
-    for (int i = startedEventPos + 1; i < systemColl.size(); i++) {
-      SolrInputDocument d = systemColl.get(i);
-      if (!"node_added_trigger".equals(d.getFieldValue("event.source_s"))) {
-        continue;
-      }
-      if ("NODEADDED".equals(d.getFieldValue("event.type_s"))) {
-        if ("IGNORED".equals(d.getFieldValue("stage_s"))) {
-          ignored++;
-          lastIgnoredPos = i;
-        }
-      }
-    }
-    assertTrue("no IGNORED events", ignored > 0);
     // make sure some replicas have been moved
     assertTrue("no MOVEREPLICA ops?", cluster.simGetOpCount("MOVEREPLICA") > 0);
 
@@ -306,7 +292,7 @@ public class TestLargeCluster extends SimSolrCloudTestCase {
 
   @Test
   public void testNodeLost() throws Exception {
-    doTestNodeLost(waitForSeconds, 5000, 1);
+    doTestNodeLost(waitForSeconds, 5000, 0);
   }
 
   // Renard R5 series - evenly covers a log10 range

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/54253534/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
index 3a118f2..807d269 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
@@ -1095,14 +1095,9 @@ public class TestTriggerIntegration extends SimSolrCloudTestCase {
     // wait for listener to capture the SUCCEEDED stage
     cluster.getTimeSource().sleep(2000);
 
-    // there must be at least one IGNORED event due to cooldown, and one SUCCEEDED event
+    // there must be exactly one SUCCEEDED event
     capturedEvents = listenerEvents.get("bar");
-    assertTrue(capturedEvents.toString(), capturedEvents.size() > 1);
-    for (int i = 0; i < capturedEvents.size() - 1; i++) {
-      CapturedEvent ev = capturedEvents.get(i);
-      assertEquals(ev.toString(), TriggerEventProcessorStage.IGNORED, ev.stage);
-      assertTrue(ev.toString(), ev.message.contains("cooldown"));
-    }
+    assertTrue(capturedEvents.toString(), capturedEvents.size() >= 1);
     CapturedEvent ev = capturedEvents.get(capturedEvents.size() - 1);
     assertEquals(ev.toString(), TriggerEventProcessorStage.SUCCEEDED, ev.stage);
     // the difference between timestamps of the first SUCCEEDED and the last SUCCEEDED


[20/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java
deleted file mode 100644
index 607588c..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.handler.admin.CollectionsHandler;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor;
-import org.apache.solr.util.TimeZoneUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_FIELD_METADATA;
-import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_INTERVAL_METADATA;
-
-/**
- * For "routed aliases", creates another collection and adds it to the alias. In some cases it will not
- * add a new collection.
- * If a collection is created, then collection creation info is returned.
- *
- * Note: this logic is within an Overseer because we want to leverage the mutual exclusion
- * property afforded by the lock it obtains on the alias name.
- * @since 7.3
- */
-public class RoutedAliasCreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String IF_MOST_RECENT_COLL_NAME = "ifMostRecentCollName";
-
-  public static final String COLL_METAPREFIX = "collection-create.";
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public RoutedAliasCreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  /* TODO:
-  There are a few classes related to time routed alias processing.  We need to share some logic better.
-   */
-
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    //---- PARSE PRIMARY MESSAGE PARAMS
-    // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
-    final String aliasName = message.getStr(NAME);
-    // the client believes this is the mostRecent collection name.  We assert this if provided.
-    final String ifMostRecentCollName = message.getStr(IF_MOST_RECENT_COLL_NAME); // optional
-
-    // TODO collection param (or intervalDateMath override?), useful for data capped collections
-
-    //---- PARSE ALIAS INFO FROM ZK
-    final ZkStateReader.AliasesManager aliasesHolder = ocmh.zkStateReader.aliasesHolder;
-    final Aliases aliases = aliasesHolder.getAliases();
-    final Map<String, String> aliasMetadata = aliases.getCollectionAliasMetadata(aliasName);
-    if (aliasMetadata == null) {
-      throw newAliasMustExistException(aliasName); // if it did exist, we'd have a non-null map
-    }
-
-    String routeField = aliasMetadata.get(ROUTER_FIELD_METADATA);
-    if (routeField == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "This command only works on time routed aliases.  Expected alias metadata not found.");
-    }
-    String intervalDateMath = aliasMetadata.getOrDefault(ROUTER_INTERVAL_METADATA, "+1DAY");
-    TimeZone intervalTimeZone = TimeZoneUtils.parseTimezone(aliasMetadata.get(CommonParams.TZ));
-
-    //TODO this is ugly; how can we organize the code related to this feature better?
-    final List<Map.Entry<Instant, String>> parsedCollections =
-        TimeRoutedAliasUpdateProcessor.parseCollections(aliasName, aliases, () -> newAliasMustExistException(aliasName));
-
-    //---- GET MOST RECENT COLL
-    final Map.Entry<Instant, String> mostRecentEntry = parsedCollections.get(0);
-    final Instant mostRecentCollTimestamp = mostRecentEntry.getKey();
-    final String mostRecentCollName = mostRecentEntry.getValue();
-    if (ifMostRecentCollName != null) {
-      if (!mostRecentCollName.equals(ifMostRecentCollName)) {
-        // Possibly due to race conditions in URPs on multiple leaders calling us at the same time
-        String msg = IF_MOST_RECENT_COLL_NAME + " expected " + ifMostRecentCollName + " but it's " + mostRecentCollName;
-        if (parsedCollections.stream().map(Map.Entry::getValue).noneMatch(ifMostRecentCollName::equals)) {
-          msg += ". Furthermore this collection isn't in the list of collections referenced by the alias.";
-        }
-        log.info(msg);
-        results.add("message", msg);
-        return;
-      }
-    } else if (mostRecentCollTimestamp.isAfter(Instant.now())) {
-      final String msg = "Most recent collection is in the future, so we won't create another.";
-      log.info(msg);
-      results.add("message", msg);
-      return;
-    }
-
-    //---- COMPUTE NEXT COLLECTION NAME
-    final Instant nextCollTimestamp = TimeRoutedAliasUpdateProcessor.computeNextCollTimestamp(mostRecentCollTimestamp, intervalDateMath, intervalTimeZone);
-    assert nextCollTimestamp.isAfter(mostRecentCollTimestamp);
-    final String createCollName = TimeRoutedAliasUpdateProcessor.formatCollectionNameFromInstant(aliasName, nextCollTimestamp);
-
-    //---- CREATE THE COLLECTION
-    // Map alias metadata starting with a prefix to a create-collection API request
-    final ModifiableSolrParams createReqParams = new ModifiableSolrParams();
-    for (Map.Entry<String, String> e : aliasMetadata.entrySet()) {
-      if (e.getKey().startsWith(COLL_METAPREFIX)) {
-        createReqParams.set(e.getKey().substring(COLL_METAPREFIX.length()), e.getValue());
-      }
-    }
-    if (createReqParams.get(COLL_CONF) == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "We require an explicit " + COLL_CONF );
-    }
-    createReqParams.set(NAME, createCollName);
-    createReqParams.set("property." + TimeRoutedAliasUpdateProcessor.TIME_PARTITION_ALIAS_NAME_CORE_PROP, aliasName);
-    // a CollectionOperation reads params and produces a message (Map) that is supposed to be sent to the Overseer.
-    //   Although we could create the Map without it, there are a fair amount of rules we don't want to reproduce.
-    final Map<String, Object> createMsgMap = CollectionsHandler.CollectionOperation.CREATE_OP.execute(
-        new LocalSolrQueryRequest(null, createReqParams),
-        null,
-        ocmh.overseer.getCoreContainer().getCollectionsHandler());
-    createMsgMap.put(Overseer.QUEUE_OPERATION, "create");
-    // Since we are running in the Overseer here, send the message directly to the Overseer CreateCollectionCmd
-    ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, new ZkNodeProps(createMsgMap), results);
-
-    CollectionsHandler.waitForActiveCollection(createCollName, null, ocmh.overseer.getCoreContainer(), new OverseerSolrResponse(results));
-
-    //TODO delete some of the oldest collection(s) ?
-
-    //---- UPDATE THE ALIAS
-    aliasesHolder.applyModificationAndExportToZk(curAliases -> {
-      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
-      if (curTargetCollections.contains(createCollName)) {
-        return curAliases;
-      } else {
-        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
-        // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
-        newTargetCollections.add(createCollName);
-        newTargetCollections.addAll(curTargetCollections);
-        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
-      }
-    });
-
-  }
-
-  private SolrException newAliasMustExistException(String aliasName) {
-    return new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-        "Alias " + aliasName + " does not exist.");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
deleted file mode 100644
index 9732616..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.PlainIdRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.util.TestInjection;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-
-public class SplitShardCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public SplitShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    split(state, message, results);
-  }
-
-  public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-    String collectionName = message.getStr(CoreAdminParams.COLLECTION);
-
-    log.info("Split shard invoked");
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    zkStateReader.forceUpdateCollection(collectionName);
-    AtomicReference<String> slice = new AtomicReference<>();
-    slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
-
-    String splitKey = message.getStr("split.key");
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    PolicyHelper.SessionWrapper sessionWrapper = null;
-
-    Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
-
-    // find the leader for the shard
-    Replica parentShardLeader = null;
-    try {
-      parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice.get(), 10000);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-    }
-
-    // let's record the ephemeralOwner of the parent leader node
-    Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
-    if (leaderZnodeStat == null)  {
-      // we just got to know the leader but its live node is gone already!
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
-    }
-
-    List<DocRouter.Range> subRanges = new ArrayList<>();
-    List<String> subSlices = new ArrayList<>();
-    List<String> subShardNames = new ArrayList<>();
-
-    String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames);
-
-    try {
-
-      boolean oldShardsDeleted = false;
-      for (String subSlice : subSlices) {
-        Slice oSlice = collection.getSlice(subSlice);
-        if (oSlice != null) {
-          final Slice.State state = oSlice.getState();
-          if (state == Slice.State.ACTIVE) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
-          } else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
-            // delete the shards
-            log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
-            Map<String, Object> propMap = new HashMap<>();
-            propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
-            propMap.put(COLLECTION_PROP, collectionName);
-            propMap.put(SHARD_ID_PROP, subSlice);
-            ZkNodeProps m = new ZkNodeProps(propMap);
-            try {
-              ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
-            } catch (Exception e) {
-              throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
-                  e);
-            }
-
-            oldShardsDeleted = true;
-          }
-        }
-      }
-
-      if (oldShardsDeleted) {
-        // refresh the locally cached cluster state
-        // we know we have the latest because otherwise deleteshard would have failed
-        clusterState = zkStateReader.getClusterState();
-        collection = clusterState.getCollection(collectionName);
-      }
-
-      final String asyncId = message.getStr(ASYNC);
-      Map<String, String> requestMap = new HashMap<>();
-      String nodeName = parentShardLeader.getNodeName();
-
-      for (int i = 0; i < subRanges.size(); i++) {
-        String subSlice = subSlices.get(i);
-        String subShardName = subShardNames.get(i);
-        DocRouter.Range subRange = subRanges.get(i);
-
-        log.info("Creating slice " + subSlice + " of collection " + collectionName + " on " + nodeName);
-
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
-        propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
-        propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
-        propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
-        propMap.put("shard_parent_node", parentShardLeader.getNodeName());
-        propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-
-        // wait until we are able to see the new shard in cluster state
-        ocmh.waitForNewShard(collectionName, subSlice);
-
-        // refresh cluster state
-        clusterState = zkStateReader.getClusterState();
-
-        log.info("Adding replica " + subShardName + " as part of slice " + subSlice + " of collection " + collectionName
-            + " on " + nodeName);
-        propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-        propMap.put(COLLECTION_PROP, collectionName);
-        propMap.put(SHARD_ID_PROP, subSlice);
-        propMap.put("node", nodeName);
-        propMap.put(CoreAdminParams.NAME, subShardName);
-        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        // copy over property params:
-        for (String key : message.keySet()) {
-          if (key.startsWith(COLL_PROP_PREFIX)) {
-            propMap.put(key, message.getStr(key));
-          }
-        }
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), results, null);
-      }
-
-      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard leaders", asyncId, requestMap);
-
-      for (String subShardName : subShardNames) {
-        // wait for parent leader to acknowledge the sub-shard core
-        log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
-        String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
-        CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-        cmd.setCoreName(subShardName);
-        cmd.setNodeName(nodeName);
-        cmd.setCoreNodeName(coreNodeName);
-        cmd.setState(Replica.State.ACTIVE);
-        cmd.setCheckLive(true);
-        cmd.setOnlyIfLeader(true);
-
-        ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
-        ocmh.sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD timed out waiting for subshard leaders to come up",
-          asyncId, requestMap);
-
-      log.info("Successfully created all sub-shards for collection " + collectionName + " parent shard: " + slice
-          + " on: " + parentShardLeader);
-
-      log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " + slice + " of collection "
-          + collectionName + " on " + parentShardLeader);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
-      params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
-      for (int i = 0; i < subShardNames.size(); i++) {
-        String subShardName = subShardNames.get(i);
-        params.add(CoreAdminParams.TARGET_CORE, subShardName);
-      }
-      params.set(CoreAdminParams.RANGES, rangesStr);
-
-      ocmh.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to invoke SPLIT core admin command", asyncId,
-          requestMap);
-
-      log.info("Index on shard: " + nodeName + " split into two successfully");
-
-      // apply buffered updates on sub-shards
-      for (int i = 0; i < subShardNames.size(); i++) {
-        String subShardName = subShardNames.get(i);
-
-        log.info("Applying buffered updates on : " + subShardName);
-
-        params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-        params.set(CoreAdminParams.NAME, subShardName);
-
-        ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
-          " to apply buffered updates", asyncId, requestMap);
-
-      log.info("Successfully applied buffered updates on : " + subShardNames);
-
-      // Replica creation for the new Slices
-
-      // look at the replication factor and see if it matches reality
-      // if it does not, find best nodes to create more cores
-
-      // TODO: Have replication factor decided in some other way instead of numShards for the parent
-
-      int repFactor = parentSlice.getReplicas().size();
-
-      // we need to look at every node and see how many cores it serves
-      // add our new cores to existing nodes serving the least number of cores
-      // but (for now) require that each core goes on a distinct node.
-
-      // TODO: add smarter options that look at the current number of cores per
-      // node?
-      // for now we just go random
-      Set<String> nodes = clusterState.getLiveNodes();
-      List<String> nodeList = new ArrayList<>(nodes.size());
-      nodeList.addAll(nodes);
-
-      // TODO: Have maxShardsPerNode param for this operation?
-
-      // Remove the node that hosts the parent shard for replica creation.
-      nodeList.remove(nodeName);
-
-      // TODO: change this to handle sharding a slice into > 2 sub-shards.
-
-      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(ocmh.cloudManager,
-          clusterState,
-          new ArrayList<>(clusterState.getLiveNodes()),
-          collectionName,
-          new ZkNodeProps(collection.getProperties()),
-          subSlices, repFactor - 1, 0, 0);
-      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
-
-      List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
-
-      for (ReplicaPosition replicaPosition : replicaPositions) {
-        String sliceName = replicaPosition.shard;
-        String subShardNodeName = replicaPosition.node;
-        String solrCoreName = collectionName + "_" + sliceName + "_replica" + (replicaPosition.index);
-
-        log.info("Creating replica shard " + solrCoreName + " as part of slice " + sliceName + " of collection "
-            + collectionName + " on " + subShardNodeName);
-
-        ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collectionName,
-            ZkStateReader.SHARD_ID_PROP, sliceName,
-            ZkStateReader.CORE_NAME_PROP, solrCoreName,
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(subShardNodeName),
-            ZkStateReader.NODE_NAME_PROP, subShardNodeName,
-            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-        propMap.put(COLLECTION_PROP, collectionName);
-        propMap.put(SHARD_ID_PROP, sliceName);
-        propMap.put("node", subShardNodeName);
-        propMap.put(CoreAdminParams.NAME, solrCoreName);
-        // copy over property params:
-        for (String key : message.keySet()) {
-          if (key.startsWith(COLL_PROP_PREFIX)) {
-            propMap.put(key, message.getStr(key));
-          }
-        }
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        // special flag param to instruct addReplica not to create the replica in cluster state again
-        propMap.put(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
-
-        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-
-        replicas.add(propMap);
-      }
-
-      assert TestInjection.injectSplitFailureBeforeReplicaCreation();
-
-      long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
-      // compare against the ephemeralOwner of the parent leader node
-      leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
-      if (leaderZnodeStat == null || ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
-        // put sub-shards in recovery_failed state
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.RECOVERY_FAILED.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-
-        if (leaderZnodeStat == null)  {
-          // the leader is not live anymore, fail the split!
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
-        } else if (ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
-          // there's a new leader, fail the split!
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "The zk session id for the shard leader node: " + parentShardLeader.getNodeName() + " has changed from "
-                  + ephemeralOwner + " to " + leaderZnodeStat.getEphemeralOwner() + ". This can cause data loss so we must abort the split");
-        }
-      }
-
-      // we must set the slice state into recovery before actually creating the replica cores
-      // this ensures that the logic inside Overseer to update sub-shard state to 'active'
-      // always gets a chance to execute. See SOLR-7673
-
-      if (repFactor == 1) {
-        // switch sub shard states to 'active'
-        log.info("Replication factor is 1 so switching shard states");
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        propMap.put(slice.get(), Slice.State.INACTIVE.toString());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.ACTIVE.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-      } else {
-        log.info("Requesting shard state be set to 'recovery'");
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.RECOVERY.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-      }
-
-      // now actually create replica cores on sub shard nodes
-      for (Map<String, Object> replica : replicas) {
-        ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard replicas", asyncId, requestMap);
-
-      log.info("Successfully created all replica shards for all sub-slices " + subSlices);
-
-      ocmh.commit(results, slice.get(), parentShardLeader);
-
-      return true;
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
-    } finally {
-      if (sessionWrapper != null) sessionWrapper.release();
-    }
-  }
-
-  public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
-    DocCollection collection = clusterState.getCollection(collectionName);
-    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-
-    Slice parentSlice;
-
-    if (slice.get() == null) {
-      if (router instanceof CompositeIdRouter) {
-        Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
-        if (searchSlices.isEmpty()) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
-        }
-        if (searchSlices.size() > 1) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
-        }
-        parentSlice = searchSlices.iterator().next();
-        slice.set(parentSlice.getName());
-        log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
-      } else {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
-                + router.getClass().getName());
-      }
-    } else {
-      parentSlice = collection.getSlice(slice.get());
-    }
-
-    if (parentSlice == null) {
-      // no chance of the collection being null because ClusterState#getCollection(String) would have thrown
-      // an exception already
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
-    }
-    return parentSlice;
-  }
-
-  public static String fillRanges(SolrCloudManager cloudManager, ZkNodeProps message, DocCollection collection, Slice parentSlice,
-                                List<DocRouter.Range> subRanges, List<String> subSlices, List<String> subShardNames) {
-    String splitKey = message.getStr("split.key");
-    DocRouter.Range range = parentSlice.getRange();
-    if (range == null) {
-      range = new PlainIdRouter().fullRange();
-    }
-    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-
-    String rangesStr = message.getStr(CoreAdminParams.RANGES);
-    if (rangesStr != null) {
-      String[] ranges = rangesStr.split(",");
-      if (ranges.length == 0 || ranges.length == 1) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
-      } else {
-        for (int i = 0; i < ranges.length; i++) {
-          String r = ranges[i];
-          try {
-            subRanges.add(DocRouter.DEFAULT.fromString(r));
-          } catch (Exception e) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
-          }
-          if (!subRanges.get(i).isSubsetOf(range)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
-          }
-        }
-        List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
-        Collections.sort(temp);
-        if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
-        }
-        for (int i = 1; i < temp.size(); i++) {
-          if (temp.get(i - 1).max + 1 != temp.get(i).min) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
-                + " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
-          }
-        }
-      }
-    } else if (splitKey != null) {
-      if (router instanceof CompositeIdRouter) {
-        CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
-        List<DocRouter.Range> tmpSubRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
-        if (tmpSubRanges.size() == 1) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
-              + " has a hash range that is exactly equal to hash range of shard: " + parentSlice.getName());
-        }
-        for (DocRouter.Range subRange : tmpSubRanges) {
-          if (subRange.min == subRange.max) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
-          }
-        }
-        subRanges.addAll(tmpSubRanges);
-        log.info("Partitioning parent shard " + parentSlice.getName() + " range: " + parentSlice.getRange() + " yields: " + subRanges);
-        rangesStr = "";
-        for (int i = 0; i < subRanges.size(); i++) {
-          DocRouter.Range subRange = subRanges.get(i);
-          rangesStr += subRange.toString();
-          if (i < subRanges.size() - 1) rangesStr += ',';
-        }
-      }
-    } else {
-      // todo: fixed to two partitions?
-      subRanges.addAll(router.partitionRange(2, range));
-    }
-
-    for (int i = 0; i < subRanges.size(); i++) {
-      String subSlice = parentSlice.getName() + "_" + i;
-      subSlices.add(subSlice);
-      String subShardName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), collection, subSlice, Replica.Type.NRT);
-      subShardNames.add(subShardName);
-    }
-    return rangesStr;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java
deleted file mode 100644
index 6a55cfd..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.request.V2Request;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.params.AutoScalingParams.NODE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class UtilizeNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public UtilizeNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ocmh.checkRequired(message, NODE);
-    String nodeName = message.getStr(NODE);
-    String async = message.getStr(ASYNC);
-    AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
-
-    //first look for any violation that may use this replica
-    List<ZkNodeProps> requests = new ArrayList<>();
-    //first look for suggestions if any
-    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, ocmh.overseer.getSolrCloudManager());
-    for (Suggester.SuggestionInfo suggestionInfo : suggestions) {
-      log.info("op: " + suggestionInfo.getOperation());
-      String coll = null;
-      List<String> pieces = StrUtils.splitSmart(suggestionInfo.getOperation().getPath(), '/');
-      if (pieces.size() > 1) {
-        coll = pieces.get(2);
-      } else {
-        continue;
-      }
-      log.info("coll: " + coll);
-      if (suggestionInfo.getOperation() instanceof V2Request) {
-        String targetNode = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/targetNode");
-        if (Objects.equals(targetNode, nodeName)) {
-          String replica = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/replica");
-          requests.add(new ZkNodeProps(COLLECTION_PROP, coll,
-              CollectionParams.TARGET_NODE, targetNode,
-              ASYNC, async,
-              REPLICA_PROP, replica));
-        }
-      }
-    }
-    executeAll(requests);
-    PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getSession(ocmh.overseer.getSolrCloudManager());
-    Policy.Session session =  sessionWrapper.get();
-    for (; ; ) {
-      Suggester suggester = session.getSuggester(MOVEREPLICA)
-          .hint(Suggester.Hint.TARGET_NODE, nodeName);
-      session = suggester.getSession();
-      SolrRequest request = suggester.getSuggestion();
-      if (request == null) break;
-      requests.add(new ZkNodeProps(COLLECTION_PROP, request.getParams().get(COLLECTION_PROP),
-          CollectionParams.TARGET_NODE, request.getParams().get(CollectionParams.TARGET_NODE),
-          REPLICA_PROP, request.getParams().get(REPLICA_PROP),
-          ASYNC, request.getParams().get(ASYNC)));
-    }
-    sessionWrapper.returnSession(session);
-    try {
-      executeAll(requests);
-    } finally {
-      sessionWrapper.release();
-    }
-  }
-
-  private void executeAll(List<ZkNodeProps> requests) throws Exception {
-    if (requests.isEmpty()) return;
-    for (ZkNodeProps props : requests) {
-      NamedList result = new NamedList();
-      ocmh.commandMap.get(MOVEREPLICA)
-          .call(ocmh.overseer.getSolrCloudManager().getClusterStateProvider().getClusterState(),
-              props,
-              result);
-    }
-    requests.clear();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
new file mode 100644
index 0000000..6b4e427
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.cloud.ActiveReplicaWatcher;
+import org.apache.solr.cloud.CloudUtil;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
+import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
+
+public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    addReplica(state, message, results, null);
+  }
+
+  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+      throws IOException, InterruptedException {
+    log.debug("addReplica() : {}", Utils.toJSONString(message));
+    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
+    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
+    final String asyncId = message.getStr(ASYNC);
+
+    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+    message = assignReplicaDetails(ocmh.cloudManager, clusterState, message, sessionWrapper);
+
+    String collection = message.getStr(COLLECTION_PROP);
+    DocCollection coll = clusterState.getCollection(collection);
+
+    String node = message.getStr(CoreAdminParams.NODE);
+    String shard = message.getStr(SHARD_ID_PROP);
+    String coreName = message.getStr(CoreAdminParams.NAME);
+    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
+    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
+    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
+    boolean parallel = message.getBool("parallel", false);
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    if (!Overseer.isLegacy(zkStateReader)) {
+      if (!skipCreateReplicaInClusterState) {
+        ZkNodeProps props = new ZkNodeProps(
+            Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
+            ZkStateReader.COLLECTION_PROP, collection,
+            ZkStateReader.SHARD_ID_PROP, shard,
+            ZkStateReader.CORE_NAME_PROP, coreName,
+            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node),
+            ZkStateReader.NODE_NAME_PROP, node,
+            ZkStateReader.REPLICA_TYPE, replicaType.name());
+        if (coreNodeName != null) {
+          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
+        }
+        try {
+          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+        } catch (Exception e) {
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception updating Overseer state queue", e);
+        }
+      }
+      params.set(CoreAdminParams.CORE_NODE_NAME,
+          ocmh.waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
+    }
+
+    String configName = zkStateReader.readConfigName(collection);
+    String routeKey = message.getStr(ShardParams._ROUTE_);
+    String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
+    String ulogDir = message.getStr(CoreAdminParams.ULOG_DIR);
+    String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
+
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
+    params.set(CoreAdminParams.NAME, coreName);
+    params.set(COLL_CONF, configName);
+    params.set(CoreAdminParams.COLLECTION, collection);
+    params.set(CoreAdminParams.REPLICA_TYPE, replicaType.name());
+    if (shard != null) {
+      params.set(CoreAdminParams.SHARD, shard);
+    } else if (routeKey != null) {
+      Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
+      if (slices.isEmpty()) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
+      } else {
+        params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
+      }
+    } else {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
+    }
+    if (dataDir != null) {
+      params.set(CoreAdminParams.DATA_DIR, dataDir);
+    }
+    if (ulogDir != null) {
+      params.set(CoreAdminParams.ULOG_DIR, ulogDir);
+    }
+    if (instanceDir != null) {
+      params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
+    }
+    if (coreNodeName != null) {
+      params.set(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
+    }
+    ocmh.addPropertyParams(message, params);
+
+    // For tracking async calls.
+    Map<String,String> requestMap = new HashMap<>();
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+
+    ocmh.sendShardRequest(node, params, shardHandler, asyncId, requestMap);
+
+    final String fnode = node;
+    final String fcoreName = coreName;
+
+    Runnable runnable = () -> {
+      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
+      ocmh.waitForCoreNodeName(collection, fnode, fcoreName);
+      if (sessionWrapper.get() != null) {
+        sessionWrapper.get().release();
+      }
+      if (onComplete != null) onComplete.run();
+    };
+
+    if (!parallel || waitForFinalState) {
+      if (waitForFinalState) {
+        SolrCloseableLatch latch = new SolrCloseableLatch(1, ocmh);
+        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collection, null, Collections.singletonList(coreName), latch);
+        try {
+          zkStateReader.registerCollectionStateWatcher(collection, watcher);
+          runnable.run();
+          if (!latch.await(timeout, TimeUnit.SECONDS)) {
+            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
+          }
+        } finally {
+          zkStateReader.removeCollectionStateWatcher(collection, watcher);
+        }
+      } else {
+        runnable.run();
+      }
+    } else {
+      ocmh.tpe.submit(runnable);
+    }
+
+
+    return new ZkNodeProps(
+        ZkStateReader.COLLECTION_PROP, collection,
+        ZkStateReader.SHARD_ID_PROP, shard,
+        ZkStateReader.CORE_NAME_PROP, coreName,
+        ZkStateReader.NODE_NAME_PROP, node
+    );
+  }
+
+  public static ZkNodeProps assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                 ZkNodeProps message, AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
+
+    String collection = message.getStr(COLLECTION_PROP);
+    String node = message.getStr(CoreAdminParams.NODE);
+    String shard = message.getStr(SHARD_ID_PROP);
+    String coreName = message.getStr(CoreAdminParams.NAME);
+    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
+    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
+    if (StringUtils.isBlank(coreName)) {
+      coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
+    }
+
+    DocCollection coll = clusterState.getCollection(collection);
+    if (coll == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
+    }
+    if (coll.getSlice(shard) == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "Collection: " + collection + " shard: " + shard + " does not exist");
+    }
+
+    // Kind of unnecessary, but it does put the logic of whether to override maxShardsPerNode in one place.
+    if (!skipCreateReplicaInClusterState) {
+      if (CloudUtil.usePolicyFramework(coll, cloudManager)) {
+        if (node == null) {
+          if(coll.getPolicyName() != null) message.getProperties().put(Policy.POLICY, coll.getPolicyName());
+          node = Assign.identifyNodes(cloudManager,
+              clusterState,
+              Collections.emptyList(),
+              collection,
+              message,
+              Collections.singletonList(shard),
+              replicaType == Replica.Type.NRT ? 0 : 1,
+              replicaType == Replica.Type.TLOG ? 0 : 1,
+              replicaType == Replica.Type.PULL ? 0 : 1
+          ).get(0).node;
+          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+        }
+      } else {
+        node = Assign.getNodesForNewReplicas(clusterState, collection, shard, 1, node,
+            cloudManager).get(0).nodeName;// TODO: use replica type in this logic too
+      }
+    }
+    log.info("Node Identified {} for creating new replica", node);
+
+    if (!clusterState.liveNodesContain(node)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
+    }
+    if (coreName == null) {
+      coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType);
+    } else if (!skipCreateReplicaInClusterState) {
+      //Validate that the core name is unique in that collection
+      for (Slice slice : coll.getSlices()) {
+        for (Replica replica : slice.getReplicas()) {
+          String replicaCoreName = replica.getStr(CORE_NAME_PROP);
+          if (coreName.equals(replicaCoreName)) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Another replica with the same core name already exists" +
+                " for this collection");
+          }
+        }
+      }
+    }
+    if (coreNodeName != null) {
+      message = message.plus(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
+    }
+    message = message.plus(CoreAdminParams.NAME, coreName);
+    message = message.plus(CoreAdminParams.NODE, node);
+    return message;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
new file mode 100644
index 0000000..e7ce583
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -0,0 +1,483 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
+import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+import org.apache.solr.cloud.rule.ReplicaAssigner;
+import org.apache.solr.cloud.rule.Rule;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.NumberUtils;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.common.cloud.DocCollection.SNITCH;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+
+public class Assign {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static int incAndGetId(DistribStateManager stateManager, String collection, int defaultValue) {
+    String path = "/collections/"+collection;
+    try {
+      if (!stateManager.hasData(path)) {
+        try {
+          stateManager.makePath(path);
+        } catch (AlreadyExistsException e) {
+          // it's okay if another beats us creating the node
+        }
+      }
+      path += "/counter";
+      if (!stateManager.hasData(path)) {
+        try {
+          stateManager.createData(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT);
+        } catch (AlreadyExistsException e) {
+          // it's okay if another beats us creating the node
+        }
+      }
+    } catch (InterruptedException e) {
+      Thread.interrupted();
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
+    } catch (IOException | KeeperException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
+    }
+
+    while (true) {
+      try {
+        int version = 0;
+        int currentId = 0;
+        VersionedData data = stateManager.getData(path, null);
+        if (data != null) {
+          currentId = NumberUtils.bytesToInt(data.getData());
+          version = data.getVersion();
+        }
+        byte[] bytes = NumberUtils.intToBytes(++currentId);
+        stateManager.setData(path, bytes, version);
+        return currentId;
+      } catch (BadVersionException e) {
+        continue;
+      } catch (IOException | KeeperException e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
+      } catch (InterruptedException e) {
+        Thread.interrupted();
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
+      }
+    }
+  }
+
+  public static String assignCoreNodeName(DistribStateManager stateManager, DocCollection collection) {
+    // for backward compatibility;
+    int defaultValue = defaultCounterValue(collection, false);
+    String coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
+    while (collection.getReplica(coreNodeName) != null) {
+      // there is wee chance that, the new coreNodeName id not totally unique,
+      // but this will be guaranteed unique for new collections
+      coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
+    }
+    return coreNodeName;
+  }
+
+  /**
+   * Assign a new unique id up to slices count - then add replicas evenly.
+   *
+   * @return the assigned shard id
+   */
+  public static String assignShard(DocCollection collection, Integer numShards) {
+    if (numShards == null) {
+      numShards = 1;
+    }
+    String returnShardId = null;
+    Map<String, Slice> sliceMap = collection != null ? collection.getActiveSlicesMap() : null;
+
+
+    // TODO: now that we create shards ahead of time, is this code needed?  Esp since hash ranges aren't assigned when creating via this method?
+
+    if (sliceMap == null) {
+      return "shard1";
+    }
+
+    List<String> shardIdNames = new ArrayList<>(sliceMap.keySet());
+
+    if (shardIdNames.size() < numShards) {
+      return "shard" + (shardIdNames.size() + 1);
+    }
+
+    // TODO: don't need to sort to find shard with fewest replicas!
+
+    // else figure out which shard needs more replicas
+    final Map<String, Integer> map = new HashMap<>();
+    for (String shardId : shardIdNames) {
+      int cnt = sliceMap.get(shardId).getReplicasMap().size();
+      map.put(shardId, cnt);
+    }
+
+    Collections.sort(shardIdNames, (String o1, String o2) -> {
+      Integer one = map.get(o1);
+      Integer two = map.get(o2);
+      return one.compareTo(two);
+    });
+
+    returnShardId = shardIdNames.get(0);
+    return returnShardId;
+  }
+
+  private static String buildSolrCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
+    // TODO: Adding the suffix is great for debugging, but may be an issue if at some point we want to support a way to change replica type
+    return String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, shard, type.name().substring(0,1).toLowerCase(Locale.ROOT), replicaNum);
+  }
+
+  private static int defaultCounterValue(DocCollection collection, boolean newCollection) {
+    if (newCollection) return 0;
+    int defaultValue = collection.getReplicas().size();
+    if (collection.getReplicationFactor() != null) {
+      // numReplicas and replicationFactor * numSlices can be not equals,
+      // in case of many addReplicas or deleteReplicas are executed
+      defaultValue = Math.max(defaultValue,
+          collection.getReplicationFactor() * collection.getSlices().size());
+    }
+    return defaultValue * 20;
+  }
+
+  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
+    Slice slice = collection.getSlice(shard);
+    int defaultValue = defaultCounterValue(collection, newCollection);
+    int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
+    String coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
+    while (existCoreName(coreName, slice)) {
+      replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
+      coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
+    }
+    return coreName;
+  }
+
+  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) {
+    return buildSolrCoreName(stateManager, collection, shard, type, false);
+  }
+
+  private static boolean existCoreName(String coreName, Slice slice) {
+    if (slice == null) return false;
+    for (Replica replica : slice.getReplicas()) {
+      if (coreName.equals(replica.getStr(CORE_NAME_PROP))) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
+    // TODO: add smarter options that look at the current number of cores per
+    // node?
+    // for now we just go random (except when createNodeSet and createNodeSet.shuffle=false are passed in)
+
+    List<String> nodeList;
+
+    final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
+    final List<String> createNodeList = (createNodeSetStr == null) ? null :
+        StrUtils.splitSmart((OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY.equals(createNodeSetStr) ?
+            "" : createNodeSetStr), ",", true);
+
+    if (createNodeList != null) {
+      nodeList = new ArrayList<>(createNodeList);
+      nodeList.retainAll(liveNodes);
+      if (message.getBool(OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE,
+          OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE_DEFAULT)) {
+        Collections.shuffle(nodeList, random);
+      }
+    } else {
+      nodeList = new ArrayList<>(liveNodes);
+      Collections.shuffle(nodeList, random);
+    }
+
+    return nodeList;
+  }
+
+  public static List<ReplicaPosition> identifyNodes(SolrCloudManager cloudManager,
+                                                    ClusterState clusterState,
+                                                    List<String> nodeList,
+                                                    String collectionName,
+                                                    ZkNodeProps message,
+                                                    List<String> shardNames,
+                                                    int numNrtReplicas,
+                                                    int numTlogReplicas,
+                                                    int numPullReplicas) throws IOException, InterruptedException {
+    List<Map> rulesMap = (List) message.get("rule");
+    String policyName = message.getStr(POLICY);
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+
+    if (rulesMap == null && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
+      log.debug("Identify nodes using default");
+      int i = 0;
+      List<ReplicaPosition> result = new ArrayList<>();
+      for (String aShard : shardNames)
+        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
+            Replica.Type.TLOG, numTlogReplicas,
+            Replica.Type.PULL, numPullReplicas
+        ).entrySet()) {
+          for (int j = 0; j < e.getValue(); j++){
+            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
+            i++;
+          }
+        }
+      return result;
+    } else {
+      if (numTlogReplicas + numPullReplicas != 0 && rulesMap != null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
+      }
+    }
+
+    if (rulesMap != null && !rulesMap.isEmpty()) {
+      List<Rule> rules = new ArrayList<>();
+      for (Object map : rulesMap) rules.add(new Rule((Map) map));
+      Map<String, Integer> sharVsReplicaCount = new HashMap<>();
+
+      for (String shard : shardNames) sharVsReplicaCount.put(shard, numNrtReplicas);
+      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
+          sharVsReplicaCount,
+          (List<Map>) message.get(SNITCH),
+          new HashMap<>(),//this is a new collection. So, there are no nodes in any shard
+          nodeList,
+          cloudManager,
+          clusterState);
+
+      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
+      return nodeMappings.entrySet().stream()
+          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
+          .collect(Collectors.toList());
+    } else  {
+      if (message.getStr(CREATE_NODE_SET) == null)
+        nodeList = Collections.emptyList();// unless explicitly specified do not pass node list to Policy
+      return getPositionsUsingPolicy(collectionName,
+          shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, cloudManager, nodeList);
+    }
+  }
+
+  static class ReplicaCount {
+    public final String nodeName;
+    public int thisCollectionNodes = 0;
+    public int totalNodes = 0;
+
+    ReplicaCount(String nodeName) {
+      this.nodeName = nodeName;
+    }
+
+    public int weight() {
+      return (thisCollectionNodes * 100) + totalNodes;
+    }
+  }
+
+  // Only called from createShard and addReplica (so far).
+  //
+  // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
+  // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
+  public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
+                                                          String shard, int nrtReplicas,
+                                                          Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException {
+    log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
+    List<String> createNodeList = null;
+
+    if (createNodeSet instanceof List) {
+      createNodeList = (List) createNodeSet;
+    } else {
+      createNodeList = createNodeSet == null ? null : StrUtils.splitSmart((String) createNodeSet, ",", true);
+    }
+
+     HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
+
+    if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
+      int availableSlots = 0;
+      for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
+        //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
+        if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
+          availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
+        }
+      }
+      if (availableSlots < nrtReplicas) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of live nodes and a maxShardsPerNode of %d",
+                nrtReplicas, collectionName, maxShardsPerNode));
+      }
+    }
+
+    List l = (List) coll.get(DocCollection.RULE);
+    List<ReplicaPosition> replicaPositions = null;
+    if (l != null) {
+      // TODO: make it so that this method doesn't require access to CC
+      replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cloudManager, coll, createNodeList, l);
+    }
+    String policyName = coll.getStr(POLICY);
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
+      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, 0, 0,
+          policyName, cloudManager, createNodeList);
+    }
+
+    if(replicaPositions != null){
+      List<ReplicaCount> repCounts = new ArrayList<>();
+      for (ReplicaPosition p : replicaPositions) {
+        repCounts.add(new ReplicaCount(p.node));
+      }
+      return repCounts;
+    }
+
+    ArrayList<ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
+    Collections.sort(sortedNodeList, (x, y) -> (x.weight() < y.weight()) ? -1 : ((x.weight() == y.weight()) ? 0 : 1));
+    return sortedNodeList;
+
+  }
+
+  public static List<ReplicaPosition> getPositionsUsingPolicy(String collName, List<String> shardNames,
+                                                              int nrtReplicas,
+                                                              int tlogReplicas,
+                                                              int pullReplicas,
+                                                              String policyName, SolrCloudManager cloudManager,
+                                                              List<String> nodesList) throws IOException, InterruptedException {
+    log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
+    List<ReplicaPosition> replicaPositions = null;
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    try {
+      Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
+      replicaPositions = PolicyHelper.getReplicaLocations(
+          collName,
+          autoScalingConfig,
+          cloudManager,
+          kvMap,
+          shardNames,
+          nrtReplicas,
+          tlogReplicas,
+          pullReplicas,
+          nodesList);
+      return replicaPositions;
+    } catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error getting replica locations", e);
+    } finally {
+      if (log.isTraceEnabled()) {
+        if (replicaPositions != null)
+          log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
+        log.trace("AUTOSCALING_CONF: " + Utils.toJSONString(autoScalingConfig));
+      }
+    }
+  }
+
+  private static List<ReplicaPosition> getNodesViaRules(ClusterState clusterState, String shard, int numberOfNodes,
+                                                        SolrCloudManager cloudManager, DocCollection coll, List<String> createNodeList, List l) {
+    ArrayList<Rule> rules = new ArrayList<>();
+    for (Object o : l) rules.add(new Rule((Map) o));
+    Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
+    for (Slice slice : coll.getSlices()) {
+      LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
+      shardVsNodes.put(slice.getName(), n);
+      for (Replica replica : slice.getReplicas()) {
+        Integer count = n.get(replica.getNodeName());
+        if (count == null) count = 0;
+        n.put(replica.getNodeName(), ++count);
+      }
+    }
+    List snitches = (List) coll.get(SNITCH);
+    List<String> nodesList = createNodeList == null ?
+        new ArrayList<>(clusterState.getLiveNodes()) :
+        createNodeList;
+    Map<ReplicaPosition, String> positions = new ReplicaAssigner(
+        rules,
+        Collections.singletonMap(shard, numberOfNodes),
+        snitches,
+        shardVsNodes,
+        nodesList, cloudManager, clusterState).getNodeMappings();
+
+    return positions.entrySet().stream().map(e -> e.getKey().setNode(e.getValue())).collect(Collectors.toList());// getReplicaCounts(positions);
+  }
+
+  private static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
+                                                                       ClusterState clusterState, List<String> createNodeList) {
+    Set<String> nodes = clusterState.getLiveNodes();
+
+    List<String> nodeList = new ArrayList<>(nodes.size());
+    nodeList.addAll(nodes);
+    if (createNodeList != null) nodeList.retainAll(createNodeList);
+
+    HashMap<String, ReplicaCount> nodeNameVsShardCount = new HashMap<>();
+    for (String s : nodeList) {
+      nodeNameVsShardCount.put(s, new ReplicaCount(s));
+    }
+    if (createNodeList != null) { // Overrides petty considerations about maxShardsPerNode
+      if (createNodeList.size() != nodeNameVsShardCount.size()) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "At least one of the node(s) specified " + createNodeList + " are not currently active in "
+                + nodeNameVsShardCount.keySet() + ", no action taken.");
+      }
+      return nodeNameVsShardCount;
+    }
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
+    Map<String, DocCollection> collections = clusterState.getCollectionsMap();
+    for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
+      DocCollection c = entry.getValue();
+      //identify suitable nodes  by checking the no:of cores in each of them
+      for (Slice slice : c.getSlices()) {
+        Collection<Replica> replicas = slice.getReplicas();
+        for (Replica replica : replicas) {
+          ReplicaCount count = nodeNameVsShardCount.get(replica.getNodeName());
+          if (count != null) {
+            count.totalNodes++; // Used ot "weigh" whether this node should be used later.
+            if (entry.getKey().equals(collectionName)) {
+              count.thisCollectionNodes++;
+              if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
+            }
+          }
+        }
+      }
+    }
+
+    return nodeNameVsShardCount;
+  }
+
+
+}


[16/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
new file mode 100644
index 0000000..8cfd0bd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.OverseerSolrResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Aliases;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.handler.admin.CollectionsHandler;
+import org.apache.solr.request.LocalSolrQueryRequest;
+import org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor;
+import org.apache.solr.util.TimeZoneUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_FIELD_METADATA;
+import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_INTERVAL_METADATA;
+
+/**
+ * For "routed aliases", creates another collection and adds it to the alias. In some cases it will not
+ * add a new collection.
+ * If a collection is created, then collection creation info is returned.
+ *
+ * Note: this logic is within an Overseer because we want to leverage the mutual exclusion
+ * property afforded by the lock it obtains on the alias name.
+ * @since 7.3
+ */
+public class RoutedAliasCreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static final String IF_MOST_RECENT_COLL_NAME = "ifMostRecentCollName";
+
+  public static final String COLL_METAPREFIX = "collection-create.";
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public RoutedAliasCreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  /* TODO:
+  There are a few classes related to time routed alias processing.  We need to share some logic better.
+   */
+
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    //---- PARSE PRIMARY MESSAGE PARAMS
+    // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
+    final String aliasName = message.getStr(NAME);
+    // the client believes this is the mostRecent collection name.  We assert this if provided.
+    final String ifMostRecentCollName = message.getStr(IF_MOST_RECENT_COLL_NAME); // optional
+
+    // TODO collection param (or intervalDateMath override?), useful for data capped collections
+
+    //---- PARSE ALIAS INFO FROM ZK
+    final ZkStateReader.AliasesManager aliasesHolder = ocmh.zkStateReader.aliasesHolder;
+    final Aliases aliases = aliasesHolder.getAliases();
+    final Map<String, String> aliasMetadata = aliases.getCollectionAliasMetadata(aliasName);
+    if (aliasMetadata == null) {
+      throw newAliasMustExistException(aliasName); // if it did exist, we'd have a non-null map
+    }
+
+    String routeField = aliasMetadata.get(ROUTER_FIELD_METADATA);
+    if (routeField == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "This command only works on time routed aliases.  Expected alias metadata not found.");
+    }
+    String intervalDateMath = aliasMetadata.getOrDefault(ROUTER_INTERVAL_METADATA, "+1DAY");
+    TimeZone intervalTimeZone = TimeZoneUtils.parseTimezone(aliasMetadata.get(CommonParams.TZ));
+
+    //TODO this is ugly; how can we organize the code related to this feature better?
+    final List<Map.Entry<Instant, String>> parsedCollections =
+        TimeRoutedAliasUpdateProcessor.parseCollections(aliasName, aliases, () -> newAliasMustExistException(aliasName));
+
+    //---- GET MOST RECENT COLL
+    final Map.Entry<Instant, String> mostRecentEntry = parsedCollections.get(0);
+    final Instant mostRecentCollTimestamp = mostRecentEntry.getKey();
+    final String mostRecentCollName = mostRecentEntry.getValue();
+    if (ifMostRecentCollName != null) {
+      if (!mostRecentCollName.equals(ifMostRecentCollName)) {
+        // Possibly due to race conditions in URPs on multiple leaders calling us at the same time
+        String msg = IF_MOST_RECENT_COLL_NAME + " expected " + ifMostRecentCollName + " but it's " + mostRecentCollName;
+        if (parsedCollections.stream().map(Map.Entry::getValue).noneMatch(ifMostRecentCollName::equals)) {
+          msg += ". Furthermore this collection isn't in the list of collections referenced by the alias.";
+        }
+        log.info(msg);
+        results.add("message", msg);
+        return;
+      }
+    } else if (mostRecentCollTimestamp.isAfter(Instant.now())) {
+      final String msg = "Most recent collection is in the future, so we won't create another.";
+      log.info(msg);
+      results.add("message", msg);
+      return;
+    }
+
+    //---- COMPUTE NEXT COLLECTION NAME
+    final Instant nextCollTimestamp = TimeRoutedAliasUpdateProcessor.computeNextCollTimestamp(mostRecentCollTimestamp, intervalDateMath, intervalTimeZone);
+    assert nextCollTimestamp.isAfter(mostRecentCollTimestamp);
+    final String createCollName = TimeRoutedAliasUpdateProcessor.formatCollectionNameFromInstant(aliasName, nextCollTimestamp);
+
+    //---- CREATE THE COLLECTION
+    // Map alias metadata starting with a prefix to a create-collection API request
+    final ModifiableSolrParams createReqParams = new ModifiableSolrParams();
+    for (Map.Entry<String, String> e : aliasMetadata.entrySet()) {
+      if (e.getKey().startsWith(COLL_METAPREFIX)) {
+        createReqParams.set(e.getKey().substring(COLL_METAPREFIX.length()), e.getValue());
+      }
+    }
+    if (createReqParams.get(COLL_CONF) == null) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "We require an explicit " + COLL_CONF );
+    }
+    createReqParams.set(NAME, createCollName);
+    createReqParams.set("property." + TimeRoutedAliasUpdateProcessor.TIME_PARTITION_ALIAS_NAME_CORE_PROP, aliasName);
+    // a CollectionOperation reads params and produces a message (Map) that is supposed to be sent to the Overseer.
+    //   Although we could create the Map without it, there are a fair amount of rules we don't want to reproduce.
+    final Map<String, Object> createMsgMap = CollectionsHandler.CollectionOperation.CREATE_OP.execute(
+        new LocalSolrQueryRequest(null, createReqParams),
+        null,
+        ocmh.overseer.getCoreContainer().getCollectionsHandler());
+    createMsgMap.put(Overseer.QUEUE_OPERATION, "create");
+    // Since we are running in the Overseer here, send the message directly to the Overseer CreateCollectionCmd
+    ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, new ZkNodeProps(createMsgMap), results);
+
+    CollectionsHandler.waitForActiveCollection(createCollName, null, ocmh.overseer.getCoreContainer(), new OverseerSolrResponse(results));
+
+    //TODO delete some of the oldest collection(s) ?
+
+    //---- UPDATE THE ALIAS
+    aliasesHolder.applyModificationAndExportToZk(curAliases -> {
+      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
+      if (curTargetCollections.contains(createCollName)) {
+        return curAliases;
+      } else {
+        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
+        // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
+        newTargetCollections.add(createCollName);
+        newTargetCollections.addAll(curTargetCollections);
+        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
+      }
+    });
+
+  }
+
+  private SolrException newAliasMustExistException(String aliasName) {
+    return new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+        "Alias " + aliasName + " does not exist.");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
new file mode 100644
index 0000000..03e7430
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -0,0 +1,540 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.PlainIdRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.util.TestInjection;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+
+public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public SplitShardCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    split(state, message, results);
+  }
+
+  public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+    String collectionName = message.getStr(CoreAdminParams.COLLECTION);
+
+    log.info("Split shard invoked");
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    zkStateReader.forceUpdateCollection(collectionName);
+    AtomicReference<String> slice = new AtomicReference<>();
+    slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
+
+    String splitKey = message.getStr("split.key");
+    DocCollection collection = clusterState.getCollection(collectionName);
+
+    PolicyHelper.SessionWrapper sessionWrapper = null;
+
+    Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
+
+    // find the leader for the shard
+    Replica parentShardLeader = null;
+    try {
+      parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice.get(), 10000);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    }
+
+    // let's record the ephemeralOwner of the parent leader node
+    Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
+    if (leaderZnodeStat == null)  {
+      // we just got to know the leader but its live node is gone already!
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
+    }
+
+    List<DocRouter.Range> subRanges = new ArrayList<>();
+    List<String> subSlices = new ArrayList<>();
+    List<String> subShardNames = new ArrayList<>();
+
+    String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames);
+
+    try {
+
+      boolean oldShardsDeleted = false;
+      for (String subSlice : subSlices) {
+        Slice oSlice = collection.getSlice(subSlice);
+        if (oSlice != null) {
+          final Slice.State state = oSlice.getState();
+          if (state == Slice.State.ACTIVE) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
+          } else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
+            // delete the shards
+            log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
+            Map<String, Object> propMap = new HashMap<>();
+            propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
+            propMap.put(COLLECTION_PROP, collectionName);
+            propMap.put(SHARD_ID_PROP, subSlice);
+            ZkNodeProps m = new ZkNodeProps(propMap);
+            try {
+              ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
+            } catch (Exception e) {
+              throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+                  e);
+            }
+
+            oldShardsDeleted = true;
+          }
+        }
+      }
+
+      if (oldShardsDeleted) {
+        // refresh the locally cached cluster state
+        // we know we have the latest because otherwise deleteshard would have failed
+        clusterState = zkStateReader.getClusterState();
+        collection = clusterState.getCollection(collectionName);
+      }
+
+      final String asyncId = message.getStr(ASYNC);
+      Map<String, String> requestMap = new HashMap<>();
+      String nodeName = parentShardLeader.getNodeName();
+
+      for (int i = 0; i < subRanges.size(); i++) {
+        String subSlice = subSlices.get(i);
+        String subShardName = subShardNames.get(i);
+        DocRouter.Range subRange = subRanges.get(i);
+
+        log.info("Creating slice " + subSlice + " of collection " + collectionName + " on " + nodeName);
+
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
+        propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
+        propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
+        propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
+        propMap.put("shard_parent_node", parentShardLeader.getNodeName());
+        propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+
+        // wait until we are able to see the new shard in cluster state
+        ocmh.waitForNewShard(collectionName, subSlice);
+
+        // refresh cluster state
+        clusterState = zkStateReader.getClusterState();
+
+        log.info("Adding replica " + subShardName + " as part of slice " + subSlice + " of collection " + collectionName
+            + " on " + nodeName);
+        propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+        propMap.put(COLLECTION_PROP, collectionName);
+        propMap.put(SHARD_ID_PROP, subSlice);
+        propMap.put("node", nodeName);
+        propMap.put(CoreAdminParams.NAME, subShardName);
+        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+        // copy over property params:
+        for (String key : message.keySet()) {
+          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+            propMap.put(key, message.getStr(key));
+          }
+        }
+        // add async param
+        if (asyncId != null) {
+          propMap.put(ASYNC, asyncId);
+        }
+        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), results, null);
+      }
+
+      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard leaders", asyncId, requestMap);
+
+      for (String subShardName : subShardNames) {
+        // wait for parent leader to acknowledge the sub-shard core
+        log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
+        String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
+        CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
+        cmd.setCoreName(subShardName);
+        cmd.setNodeName(nodeName);
+        cmd.setCoreNodeName(coreNodeName);
+        cmd.setState(Replica.State.ACTIVE);
+        cmd.setCheckLive(true);
+        cmd.setOnlyIfLeader(true);
+
+        ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
+        ocmh.sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
+      }
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD timed out waiting for subshard leaders to come up",
+          asyncId, requestMap);
+
+      log.info("Successfully created all sub-shards for collection " + collectionName + " parent shard: " + slice
+          + " on: " + parentShardLeader);
+
+      log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " + slice + " of collection "
+          + collectionName + " on " + parentShardLeader);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
+      params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
+      for (int i = 0; i < subShardNames.size(); i++) {
+        String subShardName = subShardNames.get(i);
+        params.add(CoreAdminParams.TARGET_CORE, subShardName);
+      }
+      params.set(CoreAdminParams.RANGES, rangesStr);
+
+      ocmh.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to invoke SPLIT core admin command", asyncId,
+          requestMap);
+
+      log.info("Index on shard: " + nodeName + " split into two successfully");
+
+      // apply buffered updates on sub-shards
+      for (int i = 0; i < subShardNames.size(); i++) {
+        String subShardName = subShardNames.get(i);
+
+        log.info("Applying buffered updates on : " + subShardName);
+
+        params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
+        params.set(CoreAdminParams.NAME, subShardName);
+
+        ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
+      }
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
+          " to apply buffered updates", asyncId, requestMap);
+
+      log.info("Successfully applied buffered updates on : " + subShardNames);
+
+      // Replica creation for the new Slices
+
+      // look at the replication factor and see if it matches reality
+      // if it does not, find best nodes to create more cores
+
+      // TODO: Have replication factor decided in some other way instead of numShards for the parent
+
+      int repFactor = parentSlice.getReplicas().size();
+
+      // we need to look at every node and see how many cores it serves
+      // add our new cores to existing nodes serving the least number of cores
+      // but (for now) require that each core goes on a distinct node.
+
+      // TODO: add smarter options that look at the current number of cores per
+      // node?
+      // for now we just go random
+      Set<String> nodes = clusterState.getLiveNodes();
+      List<String> nodeList = new ArrayList<>(nodes.size());
+      nodeList.addAll(nodes);
+
+      // TODO: Have maxShardsPerNode param for this operation?
+
+      // Remove the node that hosts the parent shard for replica creation.
+      nodeList.remove(nodeName);
+
+      // TODO: change this to handle sharding a slice into > 2 sub-shards.
+
+      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(ocmh.cloudManager,
+          clusterState,
+          new ArrayList<>(clusterState.getLiveNodes()),
+          collectionName,
+          new ZkNodeProps(collection.getProperties()),
+          subSlices, repFactor - 1, 0, 0);
+      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
+
+      List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
+
+      for (ReplicaPosition replicaPosition : replicaPositions) {
+        String sliceName = replicaPosition.shard;
+        String subShardNodeName = replicaPosition.node;
+        String solrCoreName = collectionName + "_" + sliceName + "_replica" + (replicaPosition.index);
+
+        log.info("Creating replica shard " + solrCoreName + " as part of slice " + sliceName + " of collection "
+            + collectionName + " on " + subShardNodeName);
+
+        ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
+            ZkStateReader.COLLECTION_PROP, collectionName,
+            ZkStateReader.SHARD_ID_PROP, sliceName,
+            ZkStateReader.CORE_NAME_PROP, solrCoreName,
+            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(subShardNodeName),
+            ZkStateReader.NODE_NAME_PROP, subShardNodeName,
+            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+
+        HashMap<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+        propMap.put(COLLECTION_PROP, collectionName);
+        propMap.put(SHARD_ID_PROP, sliceName);
+        propMap.put("node", subShardNodeName);
+        propMap.put(CoreAdminParams.NAME, solrCoreName);
+        // copy over property params:
+        for (String key : message.keySet()) {
+          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+            propMap.put(key, message.getStr(key));
+          }
+        }
+        // add async param
+        if (asyncId != null) {
+          propMap.put(ASYNC, asyncId);
+        }
+        // special flag param to instruct addReplica not to create the replica in cluster state again
+        propMap.put(OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
+
+        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+
+        replicas.add(propMap);
+      }
+
+      assert TestInjection.injectSplitFailureBeforeReplicaCreation();
+
+      long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
+      // compare against the ephemeralOwner of the parent leader node
+      leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
+      if (leaderZnodeStat == null || ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
+        // put sub-shards in recovery_failed state
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.RECOVERY_FAILED.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(Utils.toJSON(m));
+
+        if (leaderZnodeStat == null)  {
+          // the leader is not live anymore, fail the split!
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
+        } else if (ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
+          // there's a new leader, fail the split!
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+              "The zk session id for the shard leader node: " + parentShardLeader.getNodeName() + " has changed from "
+                  + ephemeralOwner + " to " + leaderZnodeStat.getEphemeralOwner() + ". This can cause data loss so we must abort the split");
+        }
+      }
+
+      // we must set the slice state into recovery before actually creating the replica cores
+      // this ensures that the logic inside Overseer to update sub-shard state to 'active'
+      // always gets a chance to execute. See SOLR-7673
+
+      if (repFactor == 1) {
+        // switch sub shard states to 'active'
+        log.info("Replication factor is 1 so switching shard states");
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        propMap.put(slice.get(), Slice.State.INACTIVE.toString());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.ACTIVE.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(Utils.toJSON(m));
+      } else {
+        log.info("Requesting shard state be set to 'recovery'");
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.RECOVERY.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(Utils.toJSON(m));
+      }
+
+      // now actually create replica cores on sub shard nodes
+      for (Map<String, Object> replica : replicas) {
+        ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
+      }
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard replicas", asyncId, requestMap);
+
+      log.info("Successfully created all replica shards for all sub-slices " + subSlices);
+
+      ocmh.commit(results, slice.get(), parentShardLeader);
+
+      return true;
+    } catch (SolrException e) {
+      throw e;
+    } catch (Exception e) {
+      log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
+    } finally {
+      if (sessionWrapper != null) sessionWrapper.release();
+    }
+  }
+
+  public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
+    DocCollection collection = clusterState.getCollection(collectionName);
+    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+
+    Slice parentSlice;
+
+    if (slice.get() == null) {
+      if (router instanceof CompositeIdRouter) {
+        Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
+        if (searchSlices.isEmpty()) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
+        }
+        if (searchSlices.size() > 1) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
+        }
+        parentSlice = searchSlices.iterator().next();
+        slice.set(parentSlice.getName());
+        log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
+      } else {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
+                + router.getClass().getName());
+      }
+    } else {
+      parentSlice = collection.getSlice(slice.get());
+    }
+
+    if (parentSlice == null) {
+      // no chance of the collection being null because ClusterState#getCollection(String) would have thrown
+      // an exception already
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
+    }
+    return parentSlice;
+  }
+
+  public static String fillRanges(SolrCloudManager cloudManager, ZkNodeProps message, DocCollection collection, Slice parentSlice,
+                                List<DocRouter.Range> subRanges, List<String> subSlices, List<String> subShardNames) {
+    String splitKey = message.getStr("split.key");
+    DocRouter.Range range = parentSlice.getRange();
+    if (range == null) {
+      range = new PlainIdRouter().fullRange();
+    }
+    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+
+    String rangesStr = message.getStr(CoreAdminParams.RANGES);
+    if (rangesStr != null) {
+      String[] ranges = rangesStr.split(",");
+      if (ranges.length == 0 || ranges.length == 1) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
+      } else {
+        for (int i = 0; i < ranges.length; i++) {
+          String r = ranges[i];
+          try {
+            subRanges.add(DocRouter.DEFAULT.fromString(r));
+          } catch (Exception e) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
+          }
+          if (!subRanges.get(i).isSubsetOf(range)) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
+          }
+        }
+        List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
+        Collections.sort(temp);
+        if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
+        }
+        for (int i = 1; i < temp.size(); i++) {
+          if (temp.get(i - 1).max + 1 != temp.get(i).min) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
+                + " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
+          }
+        }
+      }
+    } else if (splitKey != null) {
+      if (router instanceof CompositeIdRouter) {
+        CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
+        List<DocRouter.Range> tmpSubRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
+        if (tmpSubRanges.size() == 1) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
+              + " has a hash range that is exactly equal to hash range of shard: " + parentSlice.getName());
+        }
+        for (DocRouter.Range subRange : tmpSubRanges) {
+          if (subRange.min == subRange.max) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
+          }
+        }
+        subRanges.addAll(tmpSubRanges);
+        log.info("Partitioning parent shard " + parentSlice.getName() + " range: " + parentSlice.getRange() + " yields: " + subRanges);
+        rangesStr = "";
+        for (int i = 0; i < subRanges.size(); i++) {
+          DocRouter.Range subRange = subRanges.get(i);
+          rangesStr += subRange.toString();
+          if (i < subRanges.size() - 1) rangesStr += ',';
+        }
+      }
+    } else {
+      // todo: fixed to two partitions?
+      subRanges.addAll(router.partitionRange(2, range));
+    }
+
+    for (int i = 0; i < subRanges.size(); i++) {
+      String subSlice = parentSlice.getName() + "_" + i;
+      subSlices.add(subSlice);
+      String subShardName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), collection, subSlice, Replica.Type.NRT);
+      subShardNames.add(subShardName);
+    }
+    return rangesStr;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
new file mode 100644
index 0000000..60da61a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.params.AutoScalingParams.NODE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class UtilizeNodeCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public UtilizeNodeCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ocmh.checkRequired(message, NODE);
+    String nodeName = message.getStr(NODE);
+    String async = message.getStr(ASYNC);
+    AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
+
+    //first look for any violation that may use this replica
+    List<ZkNodeProps> requests = new ArrayList<>();
+    //first look for suggestions if any
+    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, ocmh.overseer.getSolrCloudManager());
+    for (Suggester.SuggestionInfo suggestionInfo : suggestions) {
+      log.info("op: " + suggestionInfo.getOperation());
+      String coll = null;
+      List<String> pieces = StrUtils.splitSmart(suggestionInfo.getOperation().getPath(), '/');
+      if (pieces.size() > 1) {
+        coll = pieces.get(2);
+      } else {
+        continue;
+      }
+      log.info("coll: " + coll);
+      if (suggestionInfo.getOperation() instanceof V2Request) {
+        String targetNode = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/targetNode");
+        if (Objects.equals(targetNode, nodeName)) {
+          String replica = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/replica");
+          requests.add(new ZkNodeProps(COLLECTION_PROP, coll,
+              CollectionParams.TARGET_NODE, targetNode,
+              ASYNC, async,
+              REPLICA_PROP, replica));
+        }
+      }
+    }
+    executeAll(requests);
+    PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getSession(ocmh.overseer.getSolrCloudManager());
+    Policy.Session session =  sessionWrapper.get();
+    for (; ; ) {
+      Suggester suggester = session.getSuggester(MOVEREPLICA)
+          .hint(Suggester.Hint.TARGET_NODE, nodeName);
+      session = suggester.getSession();
+      SolrRequest request = suggester.getSuggestion();
+      if (request == null) break;
+      requests.add(new ZkNodeProps(COLLECTION_PROP, request.getParams().get(COLLECTION_PROP),
+          CollectionParams.TARGET_NODE, request.getParams().get(CollectionParams.TARGET_NODE),
+          REPLICA_PROP, request.getParams().get(REPLICA_PROP),
+          ASYNC, request.getParams().get(ASYNC)));
+    }
+    sessionWrapper.returnSession(session);
+    try {
+      executeAll(requests);
+    } finally {
+      sessionWrapper.release();
+    }
+  }
+
+  private void executeAll(List<ZkNodeProps> requests) throws Exception {
+    if (requests.isEmpty()) return;
+    for (ZkNodeProps props : requests) {
+      NamedList result = new NamedList();
+      ocmh.commandMap.get(MOVEREPLICA)
+          .call(ocmh.overseer.getSolrCloudManager().getClusterStateProvider().getClusterState(),
+              props,
+              result);
+    }
+    requests.clear();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
new file mode 100644
index 0000000..651d4fe
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+/** 
+ * Package related to internal implementations of the SolrCloud collections api
+ */
+package org.apache.solr.cloud.api.collections;
+
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
index 55d6a7e..e5303de 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
@@ -26,7 +26,7 @@ import java.util.Map;
 
 import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index dbcdd3d..f2c9a2f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -17,7 +17,6 @@
 package org.apache.solr.cloud.overseer;
 
 import java.lang.invoke.MethodHandles;
-
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
@@ -31,9 +30,9 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.Assign;
 import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -45,7 +44,6 @@ import org.apache.solr.common.util.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
 import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
 import static org.apache.solr.cloud.overseer.CollectionMutator.checkKeyExistence;
 import static org.apache.solr.common.params.CommonParams.NAME;
@@ -113,7 +111,7 @@ public class ReplicaMutator {
     String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
     String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
     String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
+    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
       property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
     }
     property = property.toLowerCase(Locale.ROOT);
@@ -177,7 +175,7 @@ public class ReplicaMutator {
     String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
     String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
     String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
+    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
       property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
     }
 
@@ -284,7 +282,7 @@ public class ReplicaMutator {
         replicaProps.put(ZkStateReader.REPLICA_TYPE, oldReplica.getType().toString());
         // Move custom props over.
         for (Map.Entry<String, Object> ent : oldReplica.getProperties().entrySet()) {
-          if (ent.getKey().startsWith(COLL_PROP_PREFIX)) {
+          if (ent.getKey().startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
             replicaProps.put(ent.getKey(), ent.getValue());
           }
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
index 6718a80..87bf481 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
@@ -16,20 +16,18 @@
  */
 package org.apache.solr.cloud.overseer;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
-import static org.apache.solr.common.util.Utils.makeMap;
-
 import java.lang.invoke.MethodHandles;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.collect.ImmutableSet;
 import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.Assign;
 import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
@@ -41,12 +39,13 @@ import org.apache.solr.common.cloud.ZkStateReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.ImmutableSet;
+import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
+import static org.apache.solr.common.util.Utils.makeMap;
 
 public class SliceMutator {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  public static final String PREFERRED_LEADER_PROP = COLL_PROP_PREFIX + "preferredleader";
+  public static final String PREFERRED_LEADER_PROP = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + "preferredleader";
 
   public static final Set<String> SLICE_UNIQUE_BOOLEAN_PROPERTIES = ImmutableSet.of(PREFERRED_LEADER_PROP);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 74d4764..56f979d 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -42,7 +42,7 @@ import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
 import org.apache.solr.client.solrj.response.RequestStatusState;
 import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
 import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.OverseerSolrResponse;
 import org.apache.solr.cloud.OverseerTaskQueue;
 import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
@@ -100,17 +100,17 @@ import static org.apache.solr.client.solrj.response.RequestStatusState.NOT_FOUND
 import static org.apache.solr.client.solrj.response.RequestStatusState.RUNNING;
 import static org.apache.solr.client.solrj.response.RequestStatusState.SUBMITTED;
 import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.REQUESTID;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARD_UNIQUE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARDS_PROP;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
 import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
 import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
 import static org.apache.solr.common.cloud.DocCollection.RULE;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
index bc242ba..6f71acc 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
@@ -42,7 +42,7 @@ import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
 import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.RoutedAliasCreateCollectionCmd;
+import org.apache.solr.cloud.api.collections.RoutedAliasCreateCollectionCmd;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.Aliases;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
deleted file mode 100644
index a90783a..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-import java.util.TreeMap;
-
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the logic required to test Solr cloud backup/restore capability.
- */
-public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more
-
-  int replFactor;
-  int numTlogReplicas;
-  int numPullReplicas;
-
-  private static long docsSeed; // see indexDocs()
-
-  @BeforeClass
-  public static void createCluster() throws Exception {
-    docsSeed = random().nextLong();
-  }
-
-  /**
-   * @return The name of the collection to use.
-   */
-  public abstract String getCollectionName();
-
-  /**
-   * @return The name of the backup repository to use.
-   */
-  public abstract String getBackupRepoName();
-
-  /**
-   * @return The absolute path for the backup location.
-   *         Could return null.
-   */
-  public abstract String getBackupLocation();
-
-  @Test
-  public void test() throws Exception {
-    boolean isImplicit = random().nextBoolean();
-    boolean doSplitShardOperation = !isImplicit && random().nextBoolean();
-    replFactor = TestUtil.nextInt(random(), 1, 2);
-    numTlogReplicas = TestUtil.nextInt(random(), 0, 1);
-    numPullReplicas = TestUtil.nextInt(random(), 0, 1);
-    
-    CollectionAdminRequest.Create create = isImplicit ?
-      // NOTE: use shard list with same # of shards as NUM_SHARDS; we assume this later
-      CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
-      CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
-    
-    if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
-      create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
-      if (doSplitShardOperation) {
-        create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2);
-      }
-    }
-    if (random().nextBoolean()) {
-      create.setAutoAddReplicas(true);//just to assert it survives the restoration
-    }
-    Properties coreProps = new Properties();
-    coreProps.put("customKey", "customValue");//just to assert it survives the restoration
-    create.setProperties(coreProps);
-    if (isImplicit) { //implicit router
-      create.setRouterField("shard_s");
-    } else {//composite id router
-      if (random().nextBoolean()) {
-        create.setRouterField("shard_s");
-      }
-    }
-
-    CloudSolrClient solrClient = cluster.getSolrClient();
-    create.process(solrClient);
-
-    indexDocs(getCollectionName());
-
-    if (doSplitShardOperation) {
-      // shard split the first shard
-      int prevActiveSliceCount = getActiveSliceCount(getCollectionName());
-      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName());
-      splitShard.setShardName("shard1");
-      splitShard.process(solrClient);
-      // wait until we see one more active slice...
-      for (int i = 0; getActiveSliceCount(getCollectionName()) != prevActiveSliceCount + 1; i++) {
-        assertTrue(i < 30);
-        Thread.sleep(500);
-      }
-      // issue a hard commit.  Split shard does a soft commit which isn't good enough for the backup/snapshooter to see
-      solrClient.commit(getCollectionName());
-    }
-
-    testBackupAndRestore(getCollectionName());
-    testConfigBackupOnly("conf1", getCollectionName());
-    testInvalidPath(getCollectionName());
-  }
-
-  /**
-   * This test validates the backup of collection configuration using
-   *  {@linkplain CollectionAdminParams#NO_INDEX_BACKUP_STRATEGY}.
-   *
-   * @param configName The config name for the collection to be backed up.
-   * @param collectionName The name of the collection to be backed up.
-   * @throws Exception in case of errors.
-   */
-  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
-    // This is deliberately no-op since we want to run this test only for one of the backup repository
-    // implementation (mainly to avoid redundant test execution). Currently HDFS backup repository test
-    // implements this.
-  }
-
-  // This test verifies the system behavior when the backup location cluster property is configured with an invalid
-  // value for the specified repository (and the default backup location is not configured in solr.xml).
-  private void testInvalidPath(String collectionName) throws Exception {
-    // Execute this test only if the default backup location is NOT configured in solr.xml
-    if (getBackupLocation() == null) {
-      return;
-    }
-
-    String backupName = "invalidbackuprequest";
-    CloudSolrClient solrClient = cluster.getSolrClient();
-
-    ClusterProp req = CollectionAdminRequest.setClusterProperty(CoreAdminParams.BACKUP_LOCATION, "/location/does/not/exist");
-    assertEquals(0, req.process(solrClient).getStatus());
-
-    // Do not specify the backup location.
-    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
-        .setRepositoryName(getBackupRepoName());
-    try {
-      backup.process(solrClient);
-      fail("This request should have failed since the cluster property value for backup location property is invalid.");
-    } catch (SolrException ex) {
-      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
-    }
-
-    String restoreCollectionName = collectionName + "_invalidrequest";
-    CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
-        .setRepositoryName(getBackupRepoName());
-    try {
-      restore.process(solrClient);
-      fail("This request should have failed since the cluster property value for backup location property is invalid.");
-    } catch (SolrException ex) {
-      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
-    }
-  }
-
-  private int getActiveSliceCount(String collectionName) {
-    return cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName).getActiveSlices().size();
-  }
-
-  private void indexDocs(String collectionName) throws Exception {
-    Random random = new Random(docsSeed);// use a constant seed for the whole test run so that we can easily re-index.
-    int numDocs = random.nextInt(100);
-    if (numDocs == 0) {
-      log.info("Indexing ZERO test docs");
-      return;
-    }
-    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
-    for (int i=0; i<numDocs; i++) {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField("id", i);
-      doc.addField("shard_s", "shard" + (1 + random.nextInt(NUM_SHARDS))); // for implicit router
-      docs.add(doc);
-    }
-    CloudSolrClient client = cluster.getSolrClient();
-    client.add(collectionName, docs);// batch
-    client.commit(collectionName);
-  }
-
-  private void testBackupAndRestore(String collectionName) throws Exception {
-    String backupLocation = getBackupLocation();
-    String backupName = "mytestbackup";
-
-    CloudSolrClient client = cluster.getSolrClient();
-    DocCollection backupCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
-
-    Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
-    assert origShardToDocCount.isEmpty() == false;
-
-    log.info("Triggering Backup command");
-
-    {
-      CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
-          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
-      if (random().nextBoolean()) {
-        assertEquals(0, backup.process(client).getStatus());
-      } else {
-        assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(client, 30));//async
-      }
-    }
-
-    log.info("Triggering Restore command");
-
-    String restoreCollectionName = collectionName + "_restored";
-    boolean sameConfig = random().nextBoolean();
-
-    {
-      CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
-          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
-
-
-      //explicitly specify the replicationFactor/pullReplicas/nrtReplicas/tlogReplicas .
-      //Value is still the same as the original. maybe test with different values that the original for better test coverage
-      if (random().nextBoolean())  {
-        restore.setReplicationFactor(replFactor);
-      }
-      if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
-        // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
-        restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
-      }
-      
-
-      if (rarely()) { // Try with createNodeSet configuration
-        int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
-        List<String> nodeStrs = new ArrayList<>(nodeSetSize);
-        Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
-        for (int i = 0; i < nodeSetSize ; i++) {
-          nodeStrs.add(iter.next().getNodeName());
-        }
-        restore.setCreateNodeSet(String.join(",", nodeStrs));
-        restore.setCreateNodeSetShuffle(usually());
-        // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
-        if (restore.getMaxShardsPerNode() != null) {
-          restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
-        } else {
-          restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
-        }
-      }
-
-      Properties props = new Properties();
-      props.setProperty("customKey", "customVal");
-      restore.setProperties(props);
-
-      if (sameConfig==false) {
-        restore.setConfigName("customConfigName");
-      }
-      if (random().nextBoolean()) {
-        assertEquals(0, restore.process(client).getStatus());
-      } else {
-        assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 30));//async
-      }
-      AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
-    }
-
-    //Check the number of results are the same
-    DocCollection restoreCollection = client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
-    assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
-    //Re-index same docs (should be identical docs given same random seed) and test we have the same result.  Helps
-    //  test we reconstituted the hash ranges / doc router.
-    if (!(restoreCollection.getRouter() instanceof ImplicitDocRouter) && random().nextBoolean()) {
-      indexDocs(restoreCollectionName);
-      assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
-    }
-
-    assertEquals(backupCollection.getReplicationFactor(), restoreCollection.getReplicationFactor());
-    assertEquals(backupCollection.getAutoAddReplicas(), restoreCollection.getAutoAddReplicas());
-    assertEquals(backupCollection.getActiveSlices().iterator().next().getReplicas().size(),
-        restoreCollection.getActiveSlices().iterator().next().getReplicas().size());
-    assertEquals(sameConfig ? "conf1" : "customConfigName",
-        cluster.getSolrClient().getZkStateReader().readConfigName(restoreCollectionName));
-
-    Map<String, Integer> numReplicasByNodeName = new HashMap<>();
-    restoreCollection.getReplicas().forEach(x -> {
-      numReplicasByNodeName.put(x.getNodeName(), numReplicasByNodeName.getOrDefault(x.getNodeName(), 0) + 1);
-    });
-    numReplicasByNodeName.forEach((k, v) -> {
-      assertTrue("Node " + k + " has " + v + " replicas. Expected num replicas : " + restoreCollection.getMaxShardsPerNode() ,
-          v <= restoreCollection.getMaxShardsPerNode());
-    });
-
-    assertEquals("Different count of nrtReplicas. Backup collection state=" + backupCollection + "\nRestore " +
-        "collection state=" + restoreCollection, replFactor, restoreCollection.getNumNrtReplicas().intValue());
-    assertEquals("Different count of pullReplicas. Backup collection state=" + backupCollection + "\nRestore" +
-        " collection state=" + restoreCollection, numPullReplicas, restoreCollection.getNumPullReplicas().intValue());
-    assertEquals("Different count of TlogReplica. Backup collection state=" + backupCollection + "\nRestore" +
-        " collection state=" + restoreCollection, numTlogReplicas, restoreCollection.getNumTlogReplicas().intValue());
-
-    assertEquals("Restore collection should use stateFormat=2", 2, restoreCollection.getStateFormat());
-
-
-    // assert added core properties:
-    // DWS: did via manual inspection.
-    // TODO Find the applicable core.properties on the file system but how?
-  }
-
-  private Map<String, Integer> getShardToDocCountMap(CloudSolrClient client, DocCollection docCollection) throws SolrServerException, IOException {
-    Map<String,Integer> shardToDocCount = new TreeMap<>();
-    for (Slice slice : docCollection.getActiveSlices()) {
-      String shardName = slice.getName();
-      try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(slice.getLeader().getCoreUrl()).withHttpClient(client.getHttpClient()).build()) {
-        long docsInShard = leaderClient.query(new SolrQuery("*:*").setParam("distrib", "false"))
-            .getResults().getNumFound();
-        shardToDocCount.put(shardName, (int) docsInShard);
-      }
-    }
-    return shardToDocCount;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
deleted file mode 100644
index cf26de4..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class AssignTest extends SolrTestCaseJ4 {
-  
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-
-  }
-  
-  @Override
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
-  }
-  
-  @Test
-  public void testAssignNode() throws Exception {
-    assumeWorkingMockito();
-    
-    SolrZkClient zkClient = mock(SolrZkClient.class);
-    Map<String, byte[]> zkClientData = new HashMap<>();
-    when(zkClient.setData(anyString(), any(), anyInt(), anyBoolean())).then(invocation -> {
-        zkClientData.put(invocation.getArgument(0), invocation.getArgument(1));
-        return null;
-      }
-    );
-    when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
-        zkClientData.get(invocation.getArgument(0)));
-    // TODO: fix this to be independent of ZK
-    ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
-    String nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
-    assertEquals("core_node1", nodeName);
-    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
-    assertEquals("core_node1", nodeName);
-    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
-    assertEquals("core_node2", nodeName);
-  }
-
-  @Test
-  public void testIdIsUnique() throws Exception {
-    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
-    ZkTestServer server = new ZkTestServer(zkDir);
-    Object fixedValue = new Object();
-    String[] collections = new String[]{"c1","c2","c3","c4","c5","c6","c7","c8","c9"};
-    Map<String, ConcurrentHashMap<Integer, Object>> collectionUniqueIds = new HashMap<>();
-    for (String c : collections) {
-      collectionUniqueIds.put(c, new ConcurrentHashMap<>());
-    }
-
-    ExecutorService executor = ExecutorUtil.newMDCAwareCachedThreadPool("threadpool");
-    try {
-      server.run();
-
-      try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
-        assertTrue(zkClient.isConnected());
-        zkClient.makePath("/", true);
-        for (String c : collections) {
-          zkClient.makePath("/collections/"+c, true);
-        }
-        // TODO: fix this to be independent of ZK
-        ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
-        List<Future<?>> futures = new ArrayList<>();
-        for (int i = 0; i < 1000; i++) {
-          futures.add(executor.submit(() -> {
-            String collection = collections[random().nextInt(collections.length)];
-            int id = Assign.incAndGetId(stateManager, collection, 0);
-            Object val = collectionUniqueIds.get(collection).put(id, fixedValue);
-            if (val != null) {
-              fail("ZkController do not generate unique id for " + collection);
-            }
-          }));
-        }
-        for (Future<?> future : futures) {
-          future.get();
-        }
-      }
-      assertEquals(1000, (long) collectionUniqueIds.values().stream()
-          .map(ConcurrentHashMap::size)
-          .reduce((m1, m2) -> m1 + m2).get());
-    } finally {
-      server.shutdown();
-      ExecutorUtil.shutdownAndAwaitTermination(executor);
-    }
-  }
-
-
-  @Test
-  public void testBuildCoreName() throws IOException, InterruptedException, KeeperException {
-    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
-    ZkTestServer server = new ZkTestServer(zkDir);
-    server.run();
-    try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
-      zkClient.makePath("/", true);
-      // TODO: fix this to be independent of ZK
-      ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
-      Map<String, Slice> slices = new HashMap<>();
-      slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
-      slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
-
-      DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
-      assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
-      assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildSolrCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
-    } finally {
-      server.shutdown();
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 66b7866..2190c80 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -37,6 +37,7 @@ import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
index 1a01386..22862b4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
@@ -24,6 +24,11 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.ElectionContext;
+import org.apache.solr.cloud.LeaderElector;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.api.collections.ShardSplitTest;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
deleted file mode 100644
index e886bb6..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.util.RetryUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Verifies cluster state remains consistent after collection reload.
- */
-@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
-public class CollectionReloadTest extends SolrCloudTestCase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(1)
-        .addConfig("conf", configset("cloud-minimal"))
-        .configure();
-  }
-  
-  @Test
-  public void testReloadedLeaderStateAfterZkSessionLoss() throws Exception {
-
-    log.info("testReloadedLeaderStateAfterZkSessionLoss initialized OK ... running test logic");
-
-    final String testCollectionName = "c8n_1x1";
-    CollectionAdminRequest.createCollection(testCollectionName, "conf", 1, 1)
-        .process(cluster.getSolrClient());
-
-    Replica leader
-        = cluster.getSolrClient().getZkStateReader().getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
-
-    long coreStartTime = getCoreStatus(leader).getCoreStartTime().getTime();
-    CollectionAdminRequest.reloadCollection(testCollectionName).process(cluster.getSolrClient());
-
-    RetryUtil.retryUntil("Timed out waiting for core to reload", 30, 1000, TimeUnit.MILLISECONDS, () -> {
-      long restartTime = 0;
-      try {
-        restartTime = getCoreStatus(leader).getCoreStartTime().getTime();
-      } catch (Exception e) {
-        log.warn("Exception getting core start time: {}", e.getMessage());
-        return false;
-      }
-      return restartTime > coreStartTime;
-    });
-
-    final int initialStateVersion = getCollectionState(testCollectionName).getZNodeVersion();
-
-    cluster.expireZkSession(cluster.getReplicaJetty(leader));
-
-    waitForState("Timed out waiting for core to re-register as ACTIVE after session expiry", testCollectionName, (n, c) -> {
-      log.info("Collection state: {}", c.toString());
-      Replica expiredReplica = c.getReplica(leader.getName());
-      return expiredReplica.getState() == Replica.State.ACTIVE && c.getZNodeVersion() > initialStateVersion;
-    });
-
-    log.info("testReloadedLeaderStateAfterZkSessionLoss succeeded ... shutting down now!");
-  }
-}


[22/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java
deleted file mode 100644
index 765f4b9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the functionality of deleting a collection level snapshot.
- */
-public class DeleteSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName =  message.getStr(COLLECTION_PROP);
-    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
-    String asyncId = message.getStr(ASYNC);
-    Map<String, String> requestMap = new HashMap<>();
-    NamedList shardRequestResults = new NamedList();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    SolrZkClient zkClient = ocmh.overseer.getZkController().getZkClient();
-
-    Optional<CollectionSnapshotMetaData> meta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
-    if (!meta.isPresent()) { // Snapshot not found. Nothing to do.
-      return;
-    }
-
-    log.info("Deleting a snapshot for collection={} with commitName={}", collectionName, commitName);
-
-    Set<String> existingCores = new HashSet<>();
-    for (Slice s : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica r : s.getReplicas()) {
-        existingCores.add(r.getCoreName());
-      }
-    }
-
-    Set<String> coresWithSnapshot = new HashSet<>();
-    for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
-      if (existingCores.contains(m.getCoreName())) {
-        coresWithSnapshot.add(m.getCoreName());
-      }
-    }
-
-    log.info("Existing cores with snapshot for collection={} are {}", collectionName, existingCores);
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        if (replica.getState() == State.DOWN) {
-          continue; // Since replica is down - no point sending a request.
-        }
-
-        // Note - when a snapshot is found in_progress state - it is the result of overseer
-        // failure while handling the snapshot creation. Since we don't know the exact set of
-        // replicas to contact at this point, we try on all replicas.
-        if (meta.get().getStatus() == SnapshotStatus.InProgress || coresWithSnapshot.contains(replica.getCoreName())) {
-          String coreName = replica.getStr(CORE_NAME_PROP);
-
-          ModifiableSolrParams params = new ModifiableSolrParams();
-          params.set(CoreAdminParams.ACTION, CoreAdminAction.DELETESNAPSHOT.toString());
-          params.set(NAME, slice.getName());
-          params.set(CORE_NAME_PROP, coreName);
-          params.set(CoreAdminParams.COMMIT_NAME, commitName);
-
-          log.info("Sending deletesnapshot request to core={} with commitName={}", coreName, commitName);
-          ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-        }
-      }
-    }
-
-    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
-    NamedList success = (NamedList) shardRequestResults.get("success");
-    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
-    if (success != null) {
-      for ( int i = 0 ; i < success.size() ; i++) {
-        NamedList resp = (NamedList)success.getVal(i);
-        // Unfortunately async processing logic doesn't provide the "core" name automatically.
-        String coreName = (String)resp.get("core");
-        coresWithSnapshot.remove(coreName);
-      }
-    }
-
-    if (!coresWithSnapshot.isEmpty()) { // One or more failures.
-      log.warn("Failed to delete a snapshot for collection {} with commitName = {}. Snapshot could not be deleted for following cores {}",
-          collectionName, commitName, coresWithSnapshot);
-
-      List<CoreSnapshotMetaData> replicasWithSnapshot = new ArrayList<>();
-      for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
-        if (coresWithSnapshot.contains(m.getCoreName())) {
-          replicasWithSnapshot.add(m);
-        }
-      }
-
-      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
-      // which cores still contain the named snapshot.
-      CollectionSnapshotMetaData newResult = new CollectionSnapshotMetaData(meta.get().getName(), SnapshotStatus.Failed,
-          meta.get().getCreationDate(), replicasWithSnapshot);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, newResult);
-      log.info("Saved snapshot information for collection={} with commitName={} in Zookeeper as follows", collectionName, commitName,
-          Utils.toJSON(newResult));
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to delete snapshot on cores " + coresWithSnapshot);
-
-    } else {
-      // Delete the ZK path so that we eliminate the references of this snapshot from collection level meta-data.
-      SolrSnapshotManager.deleteCollectionLevelSnapshot(zkClient, collectionName, commitName);
-      log.info("Deleted Zookeeper snapshot metdata for collection={} with commitName={}", collectionName, commitName);
-      log.info("Successfully deleted snapshot for collection={} with commitName={}", collectionName, commitName);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
index 2faf6e9..953023f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
@@ -28,6 +28,7 @@ import java.util.Random;
 import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.SliceMutator;
@@ -39,8 +40,8 @@ import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARD_UNIQUE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
 
 // Class to encapsulate processing replica properties that have at most one replica hosting a property per slice.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java b/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java
deleted file mode 100644
index 1eb4873..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.Set;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-
-/**
- * We use this watcher to wait for any eligible replica in a shard to become active so that it can become a leader.
- */
-public class LeaderRecoveryWatcher implements CollectionStateWatcher {
-  String collectionId;
-  String shardId;
-  String replicaId;
-  String targetCore;
-  SolrCloseableLatch latch;
-
-  /**
-   * Watch for recovery of a replica
-   *
-   * @param collectionId   collection name
-   * @param shardId        shard id
-   * @param replicaId      source replica name (coreNodeName)
-   * @param targetCore     specific target core name - if null then any active replica will do
-   * @param latch countdown when recovered
-   */
-  LeaderRecoveryWatcher(String collectionId, String shardId, String replicaId, String targetCore, SolrCloseableLatch latch) {
-    this.collectionId = collectionId;
-    this.shardId = shardId;
-    this.replicaId = replicaId;
-    this.targetCore = targetCore;
-    this.latch = latch;
-  }
-
-  @Override
-  public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-    if (collectionState == null) { // collection has been deleted - don't wait
-      latch.countDown();
-      return true;
-    }
-    Slice slice = collectionState.getSlice(shardId);
-    if (slice == null) { // shard has been removed - don't wait
-      latch.countDown();
-      return true;
-    }
-    for (Replica replica : slice.getReplicas()) {
-      // check if another replica exists - doesn't have to be the one we're moving
-      // as long as it's active and can become a leader, in which case we don't have to wait
-      // for recovery of specifically the one that we've just added
-      if (!replica.getName().equals(replicaId)) {
-        if (replica.getType().equals(Replica.Type.PULL)) { // not eligible for leader election
-          continue;
-        }
-        // check its state
-        String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-        if (targetCore != null && !targetCore.equals(coreName)) {
-          continue;
-        }
-        if (replica.isActive(liveNodes)) { // recovered - stop waiting
-          latch.countDown();
-          return true;
-        }
-      }
-    }
-    // set the watch again to wait for the new replica to recover
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java b/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
deleted file mode 100644
index 02fdb5c..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.RoutingRule;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.update.SolrIndexSplitter;
-import org.apache.solr.util.TimeOut;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public MigrateCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String sourceCollectionName = message.getStr("collection");
-    String splitKey = message.getStr("split.key");
-    String targetCollectionName = message.getStr("target.collection");
-    int timeout = message.getInt("forward.timeout", 10 * 60) * 1000;
-
-    DocCollection sourceCollection = clusterState.getCollection(sourceCollectionName);
-    if (sourceCollection == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown source collection: " + sourceCollectionName);
-    }
-    DocCollection targetCollection = clusterState.getCollection(targetCollectionName);
-    if (targetCollection == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown target collection: " + sourceCollectionName);
-    }
-    if (!(sourceCollection.getRouter() instanceof CompositeIdRouter)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source collection must use a compositeId router");
-    }
-    if (!(targetCollection.getRouter() instanceof CompositeIdRouter)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target collection must use a compositeId router");
-    }
-    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
-    CompositeIdRouter targetRouter = (CompositeIdRouter) targetCollection.getRouter();
-    Collection<Slice> sourceSlices = sourceRouter.getSearchSlicesSingle(splitKey, null, sourceCollection);
-    if (sourceSlices.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "No active slices available in source collection: " + sourceCollection + "for given split.key: " + splitKey);
-    }
-    Collection<Slice> targetSlices = targetRouter.getSearchSlicesSingle(splitKey, null, targetCollection);
-    if (targetSlices.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "No active slices available in target collection: " + targetCollection + "for given split.key: " + splitKey);
-    }
-
-    String asyncId = null;
-    if (message.containsKey(ASYNC) && message.get(ASYNC) != null)
-      asyncId = message.getStr(ASYNC);
-
-    for (Slice sourceSlice : sourceSlices) {
-      for (Slice targetSlice : targetSlices) {
-        log.info("Migrating source shard: {} to target shard: {} for split.key = " + splitKey, sourceSlice, targetSlice);
-        migrateKey(clusterState, sourceCollection, sourceSlice, targetCollection, targetSlice, splitKey,
-            timeout, results, asyncId, message);
-      }
-    }
-  }
-
-  private void migrateKey(ClusterState clusterState, DocCollection sourceCollection, Slice sourceSlice,
-                          DocCollection targetCollection, Slice targetSlice,
-                          String splitKey, int timeout,
-                          NamedList results, String asyncId, ZkNodeProps message) throws Exception {
-    String tempSourceCollectionName = "split_" + sourceSlice.getName() + "_temp_" + targetSlice.getName();
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    if (clusterState.hasCollection(tempSourceCollectionName)) {
-      log.info("Deleting temporary collection: " + tempSourceCollectionName);
-      Map<String, Object> props = makeMap(
-          Overseer.QUEUE_OPERATION, DELETE.toLower(),
-          NAME, tempSourceCollectionName);
-
-      try {
-        ocmh.commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-        clusterState = zkStateReader.getClusterState();
-      } catch (Exception e) {
-        log.warn("Unable to clean up existing temporary collection: " + tempSourceCollectionName, e);
-      }
-    }
-
-    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
-    DocRouter.Range keyHashRange = sourceRouter.keyHashRange(splitKey);
-
-    ShardHandlerFactory shardHandlerFactory = ocmh.shardHandlerFactory;
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-    log.info("Hash range for split.key: {} is: {}", splitKey, keyHashRange);
-    // intersect source range, keyHashRange and target range
-    // this is the range that has to be split from source and transferred to target
-    DocRouter.Range splitRange = ocmh.intersect(targetSlice.getRange(), ocmh.intersect(sourceSlice.getRange(), keyHashRange));
-    if (splitRange == null) {
-      log.info("No common hashes between source shard: {} and target shard: {}", sourceSlice.getName(), targetSlice.getName());
-      return;
-    }
-    log.info("Common hash range between source shard: {} and target shard: {} = " + splitRange, sourceSlice.getName(), targetSlice.getName());
-
-    Replica targetLeader = zkStateReader.getLeaderRetry(targetCollection.getName(), targetSlice.getName(), 10000);
-    // For tracking async calls.
-    Map<String, String> requestMap = new HashMap<>();
-
-    log.info("Asking target leader node: " + targetLeader.getNodeName() + " core: "
-        + targetLeader.getStr("core") + " to buffer updates");
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTBUFFERUPDATES.toString());
-    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to buffer updates", asyncId, requestMap);
-
-    ZkNodeProps m = new ZkNodeProps(
-        Overseer.QUEUE_OPERATION, OverseerAction.ADDROUTINGRULE.toLower(),
-        COLLECTION_PROP, sourceCollection.getName(),
-        SHARD_ID_PROP, sourceSlice.getName(),
-        "routeKey", SolrIndexSplitter.getRouteKey(splitKey) + "!",
-        "range", splitRange.toString(),
-        "targetCollection", targetCollection.getName(),
-        "expireAt", RoutingRule.makeExpiryAt(timeout));
-    log.info("Adding routing rule: " + m);
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-    // wait for a while until we see the new rule
-    log.info("Waiting to see routing rule updated in clusterstate");
-    TimeOut waitUntil = new TimeOut(60, TimeUnit.SECONDS, timeSource);
-    boolean added = false;
-    while (!waitUntil.hasTimedOut()) {
-      waitUntil.sleep(100);
-      sourceCollection = zkStateReader.getClusterState().getCollection(sourceCollection.getName());
-      sourceSlice = sourceCollection.getSlice(sourceSlice.getName());
-      Map<String, RoutingRule> rules = sourceSlice.getRoutingRules();
-      if (rules != null) {
-        RoutingRule rule = rules.get(SolrIndexSplitter.getRouteKey(splitKey) + "!");
-        if (rule != null && rule.getRouteRanges().contains(splitRange)) {
-          added = true;
-          break;
-        }
-      }
-    }
-    if (!added) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not add routing rule: " + m);
-    }
-
-    log.info("Routing rule added successfully");
-
-    // Create temp core on source shard
-    Replica sourceLeader = zkStateReader.getLeaderRetry(sourceCollection.getName(), sourceSlice.getName(), 10000);
-
-    // create a temporary collection with just one node on the shard leader
-    String configName = zkStateReader.readConfigName(sourceCollection.getName());
-    Map<String, Object> props = makeMap(
-        Overseer.QUEUE_OPERATION, CREATE.toLower(),
-        NAME, tempSourceCollectionName,
-        NRT_REPLICAS, 1,
-        NUM_SLICES, 1,
-        COLL_CONF, configName,
-        CREATE_NODE_SET, sourceLeader.getNodeName());
-    if (asyncId != null) {
-      String internalAsyncId = asyncId + Math.abs(System.nanoTime());
-      props.put(ASYNC, internalAsyncId);
-    }
-
-    log.info("Creating temporary collection: " + props);
-    ocmh.commandMap.get(CREATE).call(clusterState, new ZkNodeProps(props), results);
-    // refresh cluster state
-    clusterState = zkStateReader.getClusterState();
-    Slice tempSourceSlice = clusterState.getCollection(tempSourceCollectionName).getSlices().iterator().next();
-    Replica tempSourceLeader = zkStateReader.getLeaderRetry(tempSourceCollectionName, tempSourceSlice.getName(), 120000);
-
-    String tempCollectionReplica1 = tempSourceLeader.getCoreName();
-    String coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
-        sourceLeader.getNodeName(), tempCollectionReplica1);
-    // wait for the replicas to be seen as active on temp source leader
-    log.info("Asking source leader to wait for: " + tempCollectionReplica1 + " to be alive on: " + sourceLeader.getNodeName());
-    CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-    cmd.setCoreName(tempCollectionReplica1);
-    cmd.setNodeName(sourceLeader.getNodeName());
-    cmd.setCoreNodeName(coreNodeName);
-    cmd.setState(Replica.State.ACTIVE);
-    cmd.setCheckLive(true);
-    cmd.setOnlyIfLeader(true);
-    // we don't want this to happen asynchronously
-    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), new ModifiableSolrParams(cmd.getParams()), shardHandler, null, null);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection leader" +
-        " or timed out waiting for it to come up", asyncId, requestMap);
-
-    log.info("Asking source leader to split index");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
-    params.set(CoreAdminParams.CORE, sourceLeader.getStr("core"));
-    params.add(CoreAdminParams.TARGET_CORE, tempSourceLeader.getStr("core"));
-    params.set(CoreAdminParams.RANGES, splitRange.toString());
-    params.set("split.key", splitKey);
-
-    String tempNodeName = sourceLeader.getNodeName();
-
-    ocmh.sendShardRequest(tempNodeName, params, shardHandler, asyncId, requestMap);
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to invoke SPLIT core admin command", asyncId, requestMap);
-
-    log.info("Creating a replica of temporary collection: {} on the target leader node: {}",
-        tempSourceCollectionName, targetLeader.getNodeName());
-    String tempCollectionReplica2 = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(),
-        zkStateReader.getClusterState().getCollection(tempSourceCollectionName), tempSourceSlice.getName(), Replica.Type.NRT);
-    props = new HashMap<>();
-    props.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-    props.put(COLLECTION_PROP, tempSourceCollectionName);
-    props.put(SHARD_ID_PROP, tempSourceSlice.getName());
-    props.put("node", targetLeader.getNodeName());
-    props.put(CoreAdminParams.NAME, tempCollectionReplica2);
-    // copy over property params:
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        props.put(key, message.getStr(key));
-      }
-    }
-    // add async param
-    if (asyncId != null) {
-      props.put(ASYNC, asyncId);
-    }
-    ((AddReplicaCmd)ocmh.commandMap.get(ADDREPLICA)).addReplica(clusterState, new ZkNodeProps(props), results, null);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create replica of " +
-        "temporary collection in target leader node.", asyncId, requestMap);
-
-    coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
-        targetLeader.getNodeName(), tempCollectionReplica2);
-    // wait for the replicas to be seen as active on temp source leader
-    log.info("Asking temp source leader to wait for: " + tempCollectionReplica2 + " to be alive on: " + targetLeader.getNodeName());
-    cmd = new CoreAdminRequest.WaitForState();
-    cmd.setCoreName(tempSourceLeader.getStr("core"));
-    cmd.setNodeName(targetLeader.getNodeName());
-    cmd.setCoreNodeName(coreNodeName);
-    cmd.setState(Replica.State.ACTIVE);
-    cmd.setCheckLive(true);
-    cmd.setOnlyIfLeader(true);
-    params = new ModifiableSolrParams(cmd.getParams());
-
-    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection" +
-        " replica or timed out waiting for them to come up", asyncId, requestMap);
-
-    log.info("Successfully created replica of temp source collection on target leader node");
-
-    log.info("Requesting merge of temp source collection replica to target leader");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.MERGEINDEXES.toString());
-    params.set(CoreAdminParams.CORE, targetLeader.getStr("core"));
-    params.set(CoreAdminParams.SRC_CORE, tempCollectionReplica2);
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-    String msg = "MIGRATE failed to merge " + tempCollectionReplica2 + " to "
-        + targetLeader.getStr("core") + " on node: " + targetLeader.getNodeName();
-    ocmh.processResponses(results, shardHandler, true, msg, asyncId, requestMap);
-
-    log.info("Asking target leader to apply buffered updates");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to apply buffered updates",
-        asyncId, requestMap);
-
-    try {
-      log.info("Deleting temporary collection: " + tempSourceCollectionName);
-      props = makeMap(
-          Overseer.QUEUE_OPERATION, DELETE.toLower(),
-          NAME, tempSourceCollectionName);
-      ocmh.commandMap.get(DELETE). call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-    } catch (Exception e) {
-      log.error("Unable to delete temporary collection: " + tempSourceCollectionName
-          + ". Please remove it manually", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
deleted file mode 100644
index 44493ec..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.TimeOut;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.*;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
-import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-
-public class MoveReplicaCmd implements Cmd{
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public MoveReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    moveReplica(ocmh.zkStateReader.getClusterState(), message, results);
-  }
-
-  private void moveReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    log.debug("moveReplica() : {}", Utils.toJSONString(message));
-    ocmh.checkRequired(message, COLLECTION_PROP, CollectionParams.TARGET_NODE);
-    String collection = message.getStr(COLLECTION_PROP);
-    String targetNode = message.getStr(CollectionParams.TARGET_NODE);
-    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    boolean inPlaceMove = message.getBool(IN_PLACE_MOVE, true);
-    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-
-    String async = message.getStr(ASYNC);
-
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-    }
-    if (!clusterState.getLiveNodes().contains(targetNode)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target node: " + targetNode + " not in live nodes: " + clusterState.getLiveNodes());
-    }
-    Replica replica = null;
-    if (message.containsKey(REPLICA_PROP)) {
-      String replicaName = message.getStr(REPLICA_PROP);
-      replica = coll.getReplica(replicaName);
-      if (replica == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " replica: " + replicaName + " does not exist");
-      }
-    } else {
-      String sourceNode = message.getStr(CollectionParams.SOURCE_NODE, message.getStr(CollectionParams.FROM_NODE));
-      if (sourceNode == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + CollectionParams.SOURCE_NODE +
-            " or '" + CollectionParams.FROM_NODE + "' is a required param");
-      }
-      String shardId = message.getStr(SHARD_ID_PROP);
-      if (shardId == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + SHARD_ID_PROP + "' is a required param");
-      }
-      Slice slice = clusterState.getCollection(collection).getSlice(shardId);
-      List<Replica> sliceReplicas = new ArrayList<>(slice.getReplicas());
-      Collections.shuffle(sliceReplicas, RANDOM);
-      // this picks up a single random replica from the sourceNode
-      for (Replica r : slice.getReplicas()) {
-        if (r.getNodeName().equals(sourceNode)) {
-          replica = r;
-        }
-      }
-      if (replica == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " node: " + sourceNode + " does not have any replica belonging to shard: " + shardId);
-      }
-    }
-
-    log.info("Replica will be moved to node {}: {}", targetNode, replica);
-    Slice slice = null;
-    for (Slice s : coll.getSlices()) {
-      if (s.getReplicas().contains(replica)) {
-        slice = s;
-      }
-    }
-    assert slice != null;
-    Object dataDir = replica.get("dataDir");
-    boolean isSharedFS = replica.getBool(ZkStateReader.SHARED_STORAGE_PROP, false) && dataDir != null;
-
-    if (isSharedFS && inPlaceMove) {
-      log.debug("-- moveHdfsReplica");
-      moveHdfsReplica(clusterState, results, dataDir.toString(), targetNode, async, coll, replica, slice, timeout, waitForFinalState);
-    } else {
-      log.debug("-- moveNormalReplica (inPlaceMove=" + inPlaceMove + ", isSharedFS=" + isSharedFS);
-      moveNormalReplica(clusterState, results, targetNode, async, coll, replica, slice, timeout, waitForFinalState);
-    }
-  }
-
-  private void moveHdfsReplica(ClusterState clusterState, NamedList results, String dataDir, String targetNode, String async,
-                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
-    String skipCreateReplicaInClusterState = "true";
-    if (clusterState.getLiveNodes().contains(replica.getNodeName())) {
-      skipCreateReplicaInClusterState = "false";
-      ZkNodeProps removeReplicasProps = new ZkNodeProps(
-          COLLECTION_PROP, coll.getName(),
-          SHARD_ID_PROP, slice.getName(),
-          REPLICA_PROP, replica.getName()
-      );
-      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_DATA_DIR, false);
-      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_INDEX, false);
-      if(async!=null) removeReplicasProps.getProperties().put(ASYNC, async);
-      NamedList deleteResult = new NamedList();
-      ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
-      if (deleteResult.get("failure") != null) {
-        String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
-            coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
-        log.warn(errorString);
-        results.add("failure", errorString);
-        return;
-      }
-
-      TimeOut timeOut = new TimeOut(20L, TimeUnit.SECONDS, timeSource);
-      while (!timeOut.hasTimedOut()) {
-        coll = ocmh.zkStateReader.getClusterState().getCollection(coll.getName());
-        if (coll.getReplica(replica.getName()) != null) {
-          timeOut.sleep(100);
-        } else {
-          break;
-        }
-      }
-      if (timeOut.hasTimedOut()) {
-        results.add("failure", "Still see deleted replica in clusterstate!");
-        return;
-      }
-
-    }
-
-    String ulogDir = replica.getStr(CoreAdminParams.ULOG_DIR);
-    ZkNodeProps addReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        CoreAdminParams.NODE, targetNode,
-        CoreAdminParams.CORE_NODE_NAME, replica.getName(),
-        CoreAdminParams.NAME, replica.getCoreName(),
-        WAIT_FOR_FINAL_STATE, String.valueOf(waitForFinalState),
-        SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, skipCreateReplicaInClusterState,
-        CoreAdminParams.ULOG_DIR, ulogDir.substring(0, ulogDir.lastIndexOf(UpdateLog.TLOG_NAME)),
-        CoreAdminParams.DATA_DIR, dataDir);
-    if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
-    NamedList addResult = new NamedList();
-    try {
-      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, addResult, null);
-    } catch (Exception e) {
-      // fatal error - try rolling back
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      results.add("failure", errorString);
-      log.warn("Error adding replica " + addReplicasProps + " - trying to roll back...", e);
-      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
-      NamedList rollback = new NamedList();
-      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
-      if (rollback.get("failure") != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent: " + rollback.get("failure"));
-      }
-      return;
-    }
-    if (addResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-      log.debug("--- trying to roll back...");
-      // try to roll back
-      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
-      NamedList rollback = new NamedList();
-      try {
-        ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent!", e);
-      }
-      if (rollback.get("failure") != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent! Failure: " + rollback.get("failure"));
-      }
-      return;
-    } else {
-      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
-          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), replica.getCoreName(), targetNode);
-      results.add("success", successString);
-    }
-  }
-
-  private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
-                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
-    String newCoreName = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), coll, slice.getName(), replica.getType());
-    ZkNodeProps addReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        CoreAdminParams.NODE, targetNode,
-        CoreAdminParams.NAME, newCoreName);
-    if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
-    NamedList addResult = new NamedList();
-    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(1, ocmh);
-    ActiveReplicaWatcher watcher = null;
-    ZkNodeProps props = ocmh.addReplica(clusterState, addReplicasProps, addResult, null);
-    log.debug("props " + props);
-    if (replica.equals(slice.getLeader()) || waitForFinalState) {
-      watcher = new ActiveReplicaWatcher(coll.getName(), null, Collections.singletonList(newCoreName), countDownLatch);
-      log.debug("-- registered watcher " + watcher);
-      ocmh.zkStateReader.registerCollectionStateWatcher(coll.getName(), watcher);
-    }
-    if (addResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-      if (watcher != null) { // unregister
-        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
-      }
-      return;
-    }
-    // wait for the other replica to be active if the source replica was a leader
-    if (watcher != null) {
-      try {
-        log.debug("Waiting for leader's replica to recover.");
-        if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
-          String errorString = String.format(Locale.ROOT, "Timed out waiting for leader's replica to recover, collection=%s shard=%s" +
-              " on node=%s", coll.getName(), slice.getName(), targetNode);
-          log.warn(errorString);
-          results.add("failure", errorString);
-          return;
-        } else {
-          log.debug("Replica " + watcher.getActiveReplicas() + " is active - deleting the source...");
-        }
-      } finally {
-        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
-      }
-    }
-
-    ZkNodeProps removeReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        REPLICA_PROP, replica.getName());
-    if (async != null) removeReplicasProps.getProperties().put(ASYNC, async);
-    NamedList deleteResult = new NamedList();
-    ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
-    if (deleteResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
-          coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-    } else {
-      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
-          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), newCoreName, targetNode);
-      results.add("success", successString);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/Overseer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index ee5fb18..edf3838 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import com.codahale.metrics.Timer;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.autoscaling.OverseerTriggerThread;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
index 570843a..e8d85ce 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
@@ -19,6 +19,7 @@ package org.apache.solr.cloud;
 import java.io.IOException;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.handler.component.ShardHandler;


[37/41] lucene-solr:jira/solr-11702: SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available

Posted by da...@apache.org.
SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d0a5dbe8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d0a5dbe8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d0a5dbe8

Branch: refs/heads/jira/solr-11702
Commit: d0a5dbe8d592120891205d7136f6368c473d0022
Parents: 876ecd8
Author: Noble Paul <no...@apache.org>
Authored: Tue Jan 23 00:13:27 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Jan 23 00:13:27 2018 +1100

----------------------------------------------------------------------
 solr/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d0a5dbe8/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9d3cbcb..7b2f1d3 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -162,8 +162,8 @@ Other Changes
 * SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
 
 * SOLR-11747: Pause triggers until actions finish executing and the cool down period expires. (shalin)
-
-* SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available (noble)
+SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available
+*  (noble)
 
 ==================  7.2.1 ==================
 


[41/41] lucene-solr:jira/solr-11702: SOLR-11702: Fix precommit

Posted by da...@apache.org.
SOLR-11702: Fix precommit


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6f580a45
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6f580a45
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6f580a45

Branch: refs/heads/jira/solr-11702
Commit: 6f580a4545a8f44e7c37ac6ea47c5ceff90aec7d
Parents: 278442b
Author: Cao Manh Dat <da...@apache.org>
Authored: Tue Jan 23 17:30:09 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Tue Jan 23 17:30:09 2018 +0700

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java          | 1 -
 solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java | 1 -
 2 files changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6f580a45/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java b/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
index d776bf4..ee17841 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java
@@ -35,7 +35,6 @@ import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6f580a45/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java b/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java
index f014de9..df8525b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java
@@ -51,7 +51,6 @@ import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.util.TestInjection;
 import org.apache.solr.util.TimeOut;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;


[30/41] lucene-solr:jira/solr-11702: SOLR-11834: ref-guide: [subquery] doesn't need top level fl to repeat subq.fl

Posted by da...@apache.org.
SOLR-11834: ref-guide: [subquery] doesn't need top level fl to repeat subq.fl


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/42832f88
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/42832f88
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/42832f88

Branch: refs/heads/jira/solr-11702
Commit: 42832f8839785eb9abefe8eba65a236360eec5e1
Parents: 2900bb5
Author: Mikhail Khludnev <mk...@apache.org>
Authored: Wed Jan 17 22:51:27 2018 +0300
Committer: Mikhail Khludnev <mk...@apache.org>
Committed: Wed Jan 17 22:52:12 2018 +0300

----------------------------------------------------------------------
 solr/solr-ref-guide/src/transforming-result-documents.adoc | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/42832f88/solr/solr-ref-guide/src/transforming-result-documents.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/transforming-result-documents.adoc b/solr/solr-ref-guide/src/transforming-result-documents.adoc
index 09ae314..1a0104f 100644
--- a/solr/solr-ref-guide/src/transforming-result-documents.adoc
+++ b/solr/solr-ref-guide/src/transforming-result-documents.adoc
@@ -250,14 +250,12 @@ Here is how it looks like in various formats:
 
 ==== Subquery Result Fields
 
-To appear in subquery document list, a field should be specified both fl parameters, in main one fl (despite the main result documents have no this field) and in subquery's one e.g., `foo.fl`. Of course, you can use wildcard in any or both of these parameters. For example, if field title should appear in categories subquery, it can be done via one of these ways.
+To appear in subquery document list, a field should be specified in subquery's `fl` parameter e.g., `foo.fl` (it is not necessary to specify in main one's `fl`). Of course, you can use wildcard in this parameter. For example, if field title should appear in categories subquery, it can be done via one of these ways.
 
 [source,plain]
 ----
-fl=...title,categories:[subquery]&categories.fl=title&categories.q=...
-fl=...title,categories:[subquery]&categories.fl=*&categories.q=...
-fl=...*,categories:[subquery]&categories.fl=*&categories.q=...
-fl=...*,categories:[subquery]&categories.fl=*&categories.q=...
+fl=...id,categories:[subquery]&categories.fl=title&categories.q=...
+fl=...id,categories:[subquery]&categories.fl=*&categories.q=...
 ----
 
 ==== Subquery Parameters Shift


[11/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
new file mode 100644
index 0000000..6d3ce4e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -0,0 +1,1017 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.cloud.ChaosMonkey;
+import org.apache.solr.cloud.StoppableIndexingThread;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.HashBasedRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TestInjection;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+
+@Slow
+public class ShardSplitTest extends BasicDistributedZkTest {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static final String SHARD1_0 = SHARD1 + "_0";
+  public static final String SHARD1_1 = SHARD1 + "_1";
+
+  public ShardSplitTest() {
+    schemaString = "schema15.xml";      // we need a string id
+  }
+
+  @Override
+  public void distribSetUp() throws Exception {
+    super.distribSetUp();
+    useFactory(null);
+  }
+
+  @Test
+  public void test() throws Exception {
+
+    waitForThingsToLevelOut(15);
+
+    if (usually()) {
+      log.info("Using legacyCloud=false for cluster");
+      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+          .process(cloudClient);
+    }
+    incompleteOrOverlappingCustomRangeTest();
+    splitByUniqueKeyTest();
+    splitByRouteFieldTest();
+    splitByRouteKeyTest();
+
+    // todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
+    // and the new sub-shards don't have any.
+    waitForRecoveriesToFinish(true);
+    //waitForThingsToLevelOut(15);
+  }
+
+  /*
+  Creates a collection with replicationFactor=1, splits a shard. Restarts the sub-shard leader node.
+  Add a replica. Ensure count matches in leader and replica.
+   */
+  public void testSplitStaticIndexReplication() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    DocCollection defCol = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+    Replica replica = defCol.getReplicas().get(0);
+    String nodeName = replica.getNodeName();
+
+    String collectionName = "testSplitStaticIndexReplication";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
+    create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
+    create.setCreateNodeSet(nodeName); // we want to create the leader on a fixed node so that we know which one to restart later
+    create.process(cloudClient);
+    try (CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), true, cloudClient.getLbClient().getHttpClient())) {
+      client.setDefaultCollection(collectionName);
+      StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, client, "i1", true);
+      try {
+        thread.start();
+        Thread.sleep(1000); // give the indexer sometime to do its work
+        thread.safeStop();
+        thread.join();
+        client.commit();
+        controlClient.commit();
+
+        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
+        splitShard.setShardName(SHARD1);
+        String asyncId = splitShard.processAsync(client);
+        RequestStatusState state = CollectionAdminRequest.requestStatus(asyncId).waitFor(client, 120);
+        if (state == RequestStatusState.COMPLETED)  {
+          waitForRecoveriesToFinish(collectionName, true);
+          // let's wait to see parent shard become inactive
+          CountDownLatch latch = new CountDownLatch(1);
+          client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
+            @Override
+            public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+              Slice parent = collectionState.getSlice(SHARD1);
+              Slice slice10 = collectionState.getSlice(SHARD1_0);
+              Slice slice11 = collectionState.getSlice(SHARD1_1);
+              if (slice10 != null && slice11 != null &&
+                  parent.getState() == Slice.State.INACTIVE &&
+                  slice10.getState() == Slice.State.ACTIVE &&
+                  slice11.getState() == Slice.State.ACTIVE) {
+                latch.countDown();
+                return true; // removes the watch
+              }
+              return false;
+            }
+          });
+          latch.await(1, TimeUnit.MINUTES);
+          if (latch.getCount() != 0)  {
+            // sanity check
+            fail("Sub-shards did not become active even after waiting for 1 minute");
+          }
+
+          int liveNodeCount = client.getZkStateReader().getClusterState().getLiveNodes().size();
+
+          // restart the sub-shard leader node
+          boolean restarted = false;
+          for (JettySolrRunner jetty : jettys) {
+            int port = jetty.getBaseUrl().getPort();
+            if (replica.getStr(BASE_URL_PROP).contains(":" + port))  {
+              ChaosMonkey.kill(jetty);
+              ChaosMonkey.start(jetty);
+              restarted = true;
+              break;
+            }
+          }
+          if (!restarted) {
+            // sanity check
+            fail("We could not find a jetty to kill for replica: " + replica.getCoreUrl());
+          }
+
+          // add a new replica for the sub-shard
+          CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collectionName, SHARD1_0);
+          // use control client because less chances of it being the node being restarted
+          // this is to avoid flakiness of test because of NoHttpResponseExceptions
+          String control_collection = client.getZkStateReader().getClusterState().getCollection("control_collection").getReplicas().get(0).getStr(BASE_URL_PROP);
+          try (HttpSolrClient control = new HttpSolrClient.Builder(control_collection).withHttpClient(client.getLbClient().getHttpClient()).build())  {
+            state = addReplica.processAndWait(control, 30);
+          }
+          if (state == RequestStatusState.COMPLETED)  {
+            CountDownLatch newReplicaLatch = new CountDownLatch(1);
+            client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
+              @Override
+              public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+                if (liveNodes.size() != liveNodeCount)  {
+                  return false;
+                }
+                Slice slice = collectionState.getSlice(SHARD1_0);
+                if (slice.getReplicas().size() == 2)  {
+                  if (!slice.getReplicas().stream().anyMatch(r -> r.getState() == Replica.State.RECOVERING)) {
+                    // we see replicas and none of them are recovering
+                    newReplicaLatch.countDown();
+                    return true;
+                  }
+                }
+                return false;
+              }
+            });
+            newReplicaLatch.await(30, TimeUnit.SECONDS);
+            // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
+            // handle new shards/replica so well.
+            ClusterState clusterState = client.getZkStateReader().getClusterState();
+            DocCollection collection = clusterState.getCollection(collectionName);
+            int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
+            assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
+          } else  {
+            fail("Adding a replica to sub-shard did not complete even after waiting for 30 seconds!. Saw state = " + state.getKey());
+          }
+        } else {
+          fail("We expected shard split to succeed on a static index but it didn't. Found state = " + state.getKey());
+        }
+      } finally {
+        thread.safeStop();
+        thread.join();
+      }
+    }
+  }
+
+  private int assertConsistentReplicas(Slice shard) throws SolrServerException, IOException {
+    long numFound = Long.MIN_VALUE;
+    int count = 0;
+    for (Replica replica : shard.getReplicas()) {
+      HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
+          .withHttpClient(cloudClient.getLbClient().getHttpClient()).build();
+      QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
+      log.info("Found numFound={} on replica: {}", response.getResults().getNumFound(), replica.getCoreUrl());
+      if (numFound == Long.MIN_VALUE)  {
+        numFound = response.getResults().getNumFound();
+      } else  {
+        assertEquals("Shard " + shard.getName() + " replicas do not have same number of documents", numFound, response.getResults().getNumFound());
+      }
+      count++;
+    }
+    return count;
+  }
+
+  /**
+   * Used to test that we can split a shard when a previous split event
+   * left sub-shards in construction or recovery state.
+   *
+   * See SOLR-9439
+   */
+  @Test
+  public void testSplitAfterFailedSplit() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    TestInjection.splitFailureBeforeReplicaCreation = "true:100"; // we definitely want split to fail
+    try {
+      try {
+        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        splitShard.setShardName(SHARD1);
+        splitShard.process(cloudClient);
+        fail("Shard split was not supposed to succeed after failure injection!");
+      } catch (Exception e) {
+        // expected
+      }
+
+      // assert that sub-shards cores exist and sub-shard is in construction state
+      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      zkStateReader.forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+      ClusterState state = zkStateReader.getClusterState();
+      DocCollection collection = state.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+
+      Slice shard10 = collection.getSlice(SHARD1_0);
+      assertEquals(Slice.State.CONSTRUCTION, shard10.getState());
+      assertEquals(1, shard10.getReplicas().size());
+
+      Slice shard11 = collection.getSlice(SHARD1_1);
+      assertEquals(Slice.State.CONSTRUCTION, shard11.getState());
+      assertEquals(1, shard11.getReplicas().size());
+
+      // lets retry the split
+      TestInjection.reset(); // let the split succeed
+      try {
+        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        splitShard.setShardName(SHARD1);
+        splitShard.process(cloudClient);
+        // Yay!
+      } catch (Exception e) {
+        log.error("Shard split failed", e);
+        fail("Shard split did not succeed after a previous failed split attempt left sub-shards in construction state");
+      }
+
+    } finally {
+      TestInjection.reset();
+    }
+  }
+
+  @Test
+  public void testSplitWithChaosMonkey() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    List<StoppableIndexingThread> indexers = new ArrayList<>();
+    try {
+      for (int i = 0; i < 1; i++) {
+        StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, cloudClient, String.valueOf(i), true);
+        indexers.add(thread);
+        thread.start();
+      }
+      Thread.sleep(1000); // give the indexers some time to do their work
+    } catch (Exception e) {
+      log.error("Error in test", e);
+    } finally {
+      for (StoppableIndexingThread indexer : indexers) {
+        indexer.safeStop();
+        indexer.join();
+      }
+    }
+
+    cloudClient.commit();
+    controlClient.commit();
+
+    AtomicBoolean stop = new AtomicBoolean();
+    AtomicBoolean killed = new AtomicBoolean(false);
+    Runnable monkey = new Runnable() {
+      @Override
+      public void run() {
+        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+        zkStateReader.registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
+          @Override
+          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+            if (stop.get()) {
+              return true; // abort and remove the watch
+            }
+            Slice slice = collectionState.getSlice(SHARD1_0);
+            if (slice != null && slice.getReplicas().size() > 1) {
+              // ensure that only one watcher invocation thread can kill!
+              if (killed.compareAndSet(false, true))  {
+                log.info("Monkey thread found 2 replicas for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
+                CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
+                try {
+                  Thread.sleep(1000 + random().nextInt(500));
+                  ChaosMonkey.kill(cjetty);
+                  stop.set(true);
+                  return true;
+                } catch (Exception e) {
+                  log.error("Monkey unable to kill jetty at port " + cjetty.jetty.getLocalPort(), e);
+                }
+              }
+            }
+            log.info("Monkey thread found only one replica for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
+            return false;
+          }
+        });
+      }
+    };
+
+    Thread monkeyThread = null;
+    monkeyThread = new Thread(monkey);
+    monkeyThread.start();
+    try {
+      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+      splitShard.setShardName(SHARD1);
+      String asyncId = splitShard.processAsync(cloudClient);
+      RequestStatusState splitStatus = null;
+      try {
+        splitStatus = CollectionAdminRequest.requestStatus(asyncId).waitFor(cloudClient, 120);
+      } catch (Exception e) {
+        log.warn("Failed to get request status, maybe because the overseer node was shutdown by monkey", e);
+      }
+
+      // we don't care if the split failed because we are injecting faults and it is likely
+      // that the split has failed but in any case we want to assert that all docs that got
+      // indexed are available in SolrCloud and if the split succeeded then all replicas of the sub-shard
+      // must be consistent (i.e. have same numdocs)
+
+      log.info("Shard split request state is COMPLETED");
+      stop.set(true);
+      monkeyThread.join();
+      Set<String> addFails = new HashSet<>();
+      Set<String> deleteFails = new HashSet<>();
+      for (StoppableIndexingThread indexer : indexers) {
+        addFails.addAll(indexer.getAddFails());
+        deleteFails.addAll(indexer.getDeleteFails());
+      }
+
+      CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
+      log.info("Starting shard1 leader jetty at port {}", cjetty.jetty.getLocalPort());
+      ChaosMonkey.start(cjetty.jetty);
+      cloudClient.getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+      log.info("Current collection state: {}", printClusterStateInfo(AbstractDistribZkTestBase.DEFAULT_COLLECTION));
+
+      boolean replicaCreationsFailed = false;
+      if (splitStatus == RequestStatusState.FAILED)  {
+        // either one or more replica creation failed (because it may have been created on the same parent shard leader node)
+        // or the split may have failed while trying to soft-commit *after* all replicas have been created
+        // the latter counts as a successful switch even if the API doesn't say so
+        // so we must find a way to distinguish between the two
+        // an easy way to do that is to look at the sub-shard replicas and check if the replica core actually exists
+        // instead of existing solely inside the cluster state
+        DocCollection collectionState = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        Slice slice10 = collectionState.getSlice(SHARD1_0);
+        Slice slice11 = collectionState.getSlice(SHARD1_1);
+        if (slice10 != null && slice11 != null) {
+          for (Replica replica : slice10) {
+            if (!doesReplicaCoreExist(replica)) {
+              replicaCreationsFailed = true;
+              break;
+            }
+          }
+          for (Replica replica : slice11) {
+            if (!doesReplicaCoreExist(replica)) {
+              replicaCreationsFailed = true;
+              break;
+            }
+          }
+        }
+      }
+
+      // true if sub-shard states switch to 'active' eventually
+      AtomicBoolean areSubShardsActive = new AtomicBoolean(false);
+
+      if (!replicaCreationsFailed)  {
+        // all sub-shard replicas were created successfully so all cores must recover eventually
+        waitForRecoveriesToFinish(AbstractDistribZkTestBase.DEFAULT_COLLECTION, true);
+        // let's wait for the overseer to switch shard states
+        CountDownLatch latch = new CountDownLatch(1);
+        cloudClient.getZkStateReader().registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
+          @Override
+          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+            Slice parent = collectionState.getSlice(SHARD1);
+            Slice slice10 = collectionState.getSlice(SHARD1_0);
+            Slice slice11 = collectionState.getSlice(SHARD1_1);
+            if (slice10 != null && slice11 != null &&
+                parent.getState() == Slice.State.INACTIVE &&
+                slice10.getState() == Slice.State.ACTIVE &&
+                slice11.getState() == Slice.State.ACTIVE) {
+              areSubShardsActive.set(true);
+              latch.countDown();
+              return true; // removes the watch
+            } else if (slice10 != null && slice11 != null &&
+                parent.getState() == Slice.State.ACTIVE &&
+                slice10.getState() == Slice.State.RECOVERY_FAILED &&
+                slice11.getState() == Slice.State.RECOVERY_FAILED) {
+              areSubShardsActive.set(false);
+              latch.countDown();
+              return true;
+            }
+            return false;
+          }
+        });
+
+        latch.await(2, TimeUnit.MINUTES);
+
+        if (latch.getCount() != 0)  {
+          // sanity check
+          fail("We think that split was successful but sub-shard states were not updated even after 2 minutes.");
+        }
+      }
+
+      cloudClient.commit(); // for visibility of results on sub-shards
+
+      checkShardConsistency(true, true, addFails, deleteFails);
+      long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+      // ensure we have added more than 0 docs
+      long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+      assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);
+      assertEquals("Found " + ctrlDocs + " control docs and " + cloudClientDocs + " cloud docs", ctrlDocs, cloudClientDocs);
+
+      // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
+      // handle new shards/replica so well.
+      if (areSubShardsActive.get()) {
+        ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+        DocCollection collection = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
+        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
+        numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_1));
+        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_1", 2, numReplicasChecked);
+      }
+    } finally {
+      stop.set(true);
+      monkeyThread.join();
+    }
+  }
+
+  private boolean doesReplicaCoreExist(Replica replica) throws IOException {
+    try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getStr(BASE_URL_PROP))
+        .withHttpClient(cloudClient.getLbClient().getHttpClient()).build())  {
+      String coreName = replica.getCoreName();
+      try {
+        CoreAdminResponse status = CoreAdminRequest.getStatus(coreName, client);
+        if (status.getCoreStatus(coreName) == null || status.getCoreStatus(coreName).size() == 0) {
+          return false;
+        }
+      } catch (Exception e) {
+        log.warn("Error gettting core status of replica " + replica + ". Perhaps it does not exist!", e);
+        return false;
+      }
+    }
+    return true;
+  }
+
+  @Test
+  public void testSplitShardWithRule() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    if (usually()) {
+      log.info("Using legacyCloud=false for cluster");
+      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+          .process(cloudClient);
+    }
+
+    log.info("Starting testSplitShardWithRule");
+    String collectionName = "shardSplitWithRule";
+    CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2)
+        .setRule("shard:*,replica:<2,node:*");
+    CollectionAdminResponse response = createRequest.process(cloudClient);
+    assertEquals(0, response.getStatus());
+
+    CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName)
+        .setShardName("shard1");
+    response = splitShardRequest.process(cloudClient);
+    assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus());
+  }
+
+  private void incompleteOrOverlappingCustomRangeTest() throws Exception  {
+    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
+    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
+    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+
+    List<DocRouter.Range> subRanges = new ArrayList<>();
+    List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
+
+    // test with only one range
+    subRanges.add(ranges.get(0));
+    try {
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      fail("Shard splitting with just one custom hash range should not succeed");
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      log.info("Expected exception:", e);
+    }
+    subRanges.clear();
+
+    // test with ranges with a hole in between them
+    subRanges.add(ranges.get(3)); // order shouldn't matter
+    subRanges.add(ranges.get(0));
+    try {
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      fail("Shard splitting with missing hashes in between given ranges should not succeed");
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      log.info("Expected exception:", e);
+    }
+    subRanges.clear();
+
+    // test with overlapping ranges
+    subRanges.add(ranges.get(0));
+    subRanges.add(ranges.get(1));
+    subRanges.add(ranges.get(2));
+    subRanges.add(new DocRouter.Range(ranges.get(3).min - 15, ranges.get(3).max));
+    try {
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      fail("Shard splitting with overlapping ranges should not succeed");
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      log.info("Expected exception:", e);
+    }
+    subRanges.clear();
+  }
+
+  private void splitByUniqueKeyTest() throws Exception {
+    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
+    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
+    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+    List<DocRouter.Range> subRanges = new ArrayList<>();
+    if (usually())  {
+      List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
+      // 75% of range goes to shard1_0 and the rest to shard1_1
+      subRanges.add(new DocRouter.Range(ranges.get(0).min, ranges.get(2).max));
+      subRanges.add(ranges.get(3));
+    } else  {
+      subRanges = router.partitionRange(2, shard1Range);
+    }
+    final List<DocRouter.Range> ranges = subRanges;
+    final int[] docCounts = new int[ranges.size()];
+    int numReplicas = shard1.getReplicas().size();
+
+    del("*:*");
+    for (int id = 0; id <= 100; id++) {
+      String shardKey = "" + (char)('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
+      indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id), id);
+    }
+    commit();
+
+    Thread indexThread = new Thread() {
+      @Override
+      public void run() {
+        Random random = random();
+        int max = atLeast(random, 401);
+        int sleep = atLeast(random, 25);
+        log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms");
+        Set<String> deleted = new HashSet<>();
+        for (int id = 101; id < max; id++) {
+          try {
+            indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
+            Thread.sleep(sleep);
+            if (usually(random))  {
+              String delId = String.valueOf(random.nextInt(id - 101 + 1) + 101);
+              if (deleted.contains(delId))  continue;
+              try {
+                deleteAndUpdateCount(router, ranges, docCounts, delId);
+                deleted.add(delId);
+              } catch (Exception e) {
+                log.error("Exception while deleting docs", e);
+              }
+            }
+          } catch (Exception e) {
+            log.error("Exception while adding doc id = " + id, e);
+            // do not select this id for deletion ever
+            deleted.add(String.valueOf(id));
+          }
+        }
+      }
+    };
+    indexThread.start();
+
+    try {
+      for (int i = 0; i < 3; i++) {
+        try {
+          splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+          log.info("Layout after split: \n");
+          printLayout();
+          break;
+        } catch (HttpSolrClient.RemoteSolrException e) {
+          if (e.code() != 500)  {
+            throw e;
+          }
+          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
+          if (i == 2) {
+            fail("SPLITSHARD was not successful even after three tries");
+          }
+        }
+      }
+    } finally {
+      try {
+        indexThread.join();
+      } catch (InterruptedException e) {
+        log.error("Indexing thread interrupted", e);
+      }
+    }
+
+    waitForRecoveriesToFinish(true);
+    checkDocCountsAndShardStates(docCounts, numReplicas);
+  }
+
+
+  public void splitByRouteFieldTest() throws Exception  {
+    log.info("Starting testSplitWithRouteField");
+    String collectionName = "routeFieldColl";
+    int numShards = 4;
+    int replicationFactor = 2;
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
+        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+
+    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
+    String shard_fld = "shard_s";
+    try (CloudSolrClient client = createCloudClient(null)) {
+      Map<String, Object> props = Utils.makeMap(
+          REPLICATION_FACTOR, replicationFactor,
+          MAX_SHARDS_PER_NODE, maxShardsPerNode,
+          OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+          "router.field", shard_fld);
+
+      createCollection(collectionInfos, collectionName,props,client);
+    }
+
+    List<Integer> list = collectionInfos.get(collectionName);
+    checkForCollection(collectionName, list, null);
+
+    waitForRecoveriesToFinish(false);
+
+    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+
+    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
+
+      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
+      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
+      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+      final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
+      final int[] docCounts = new int[ranges.size()];
+
+      for (int i = 100; i <= 200; i++) {
+        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
+
+        collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
+        int idx = getHashRangeIdx(router, ranges, shardKey);
+        if (idx != -1) {
+          docCounts[idx]++;
+        }
+      }
+
+      for (int i = 0; i < docCounts.length; i++) {
+        int docCount = docCounts[i];
+        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
+      }
+
+      collectionClient.commit();
+
+      for (int i = 0; i < 3; i++) {
+        try {
+          splitShard(collectionName, SHARD1, null, null);
+          break;
+        } catch (HttpSolrClient.RemoteSolrException e) {
+          if (e.code() != 500) {
+            throw e;
+          }
+          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
+          if (i == 2) {
+            fail("SPLITSHARD was not successful even after three tries");
+          }
+        }
+      }
+
+      waitForRecoveriesToFinish(collectionName, false);
+
+      assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
+      assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
+    }
+  }
+
+  private void splitByRouteKeyTest() throws Exception {
+    log.info("Starting splitByRouteKeyTest");
+    String collectionName = "splitByRouteKeyTest";
+    int numShards = 4;
+    int replicationFactor = 2;
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
+        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+
+    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      Map<String, Object> props = Utils.makeMap(
+          REPLICATION_FACTOR, replicationFactor,
+          MAX_SHARDS_PER_NODE, maxShardsPerNode,
+          OverseerCollectionMessageHandler.NUM_SLICES, numShards);
+
+      createCollection(collectionInfos, collectionName,props,client);
+    }
+
+    List<Integer> list = collectionInfos.get(collectionName);
+    checkForCollection(collectionName, list, null);
+
+    waitForRecoveriesToFinish(false);
+
+    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+
+    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
+
+      String splitKey = "b!";
+
+      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
+      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
+      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+      final List<DocRouter.Range> ranges = ((CompositeIdRouter) router).partitionRangeByKey(splitKey, shard1Range);
+      final int[] docCounts = new int[ranges.size()];
+
+      int uniqIdentifier = (1 << 12);
+      int splitKeyDocCount = 0;
+      for (int i = 100; i <= 200; i++) {
+        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
+
+        String idStr = shardKey + "!" + i;
+        collectionClient.add(getDoc(id, idStr, "n_ti", (shardKey + "!").equals(splitKey) ? uniqIdentifier : i));
+        int idx = getHashRangeIdx(router, ranges, idStr);
+        if (idx != -1) {
+          docCounts[idx]++;
+        }
+        if (splitKey.equals(shardKey + "!"))
+          splitKeyDocCount++;
+      }
+
+      for (int i = 0; i < docCounts.length; i++) {
+        int docCount = docCounts[i];
+        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
+      }
+      log.info("Route key doc count = {}", splitKeyDocCount);
+
+      collectionClient.commit();
+
+      for (int i = 0; i < 3; i++) {
+        try {
+          splitShard(collectionName, null, null, splitKey);
+          break;
+        } catch (HttpSolrClient.RemoteSolrException e) {
+          if (e.code() != 500) {
+            throw e;
+          }
+          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
+          if (i == 2) {
+            fail("SPLITSHARD was not successful even after three tries");
+          }
+        }
+      }
+
+      waitForRecoveriesToFinish(collectionName, false);
+      SolrQuery solrQuery = new SolrQuery("*:*");
+      assertEquals("DocCount on shard1_0 does not match", docCounts[0], collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
+      assertEquals("DocCount on shard1_1 does not match", docCounts[1], collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
+      assertEquals("DocCount on shard1_2 does not match", docCounts[2], collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
+
+      solrQuery = new SolrQuery("n_ti:" + uniqIdentifier);
+      assertEquals("shard1_0 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
+      assertEquals("Wrong number of docs on shard1_1 for route key: " + splitKey, splitKeyDocCount, collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
+      assertEquals("shard1_2 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
+    }
+  }
+
+  protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws Exception {
+    ClusterState clusterState = null;
+    Slice slice1_0 = null, slice1_1 = null;
+    int i = 0;
+    for (i = 0; i < 10; i++) {
+      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      clusterState = zkStateReader.getClusterState();
+      slice1_0 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_0");
+      slice1_1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_1");
+      if (slice1_0.getState() == Slice.State.ACTIVE && slice1_1.getState() == Slice.State.ACTIVE) {
+        break;
+      }
+      Thread.sleep(500);
+    }
+
+    log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
+
+    assertNotNull("Cluster state does not contain shard1_0", slice1_0);
+    assertNotNull("Cluster state does not contain shard1_0", slice1_1);
+    assertSame("shard1_0 is not active", Slice.State.ACTIVE, slice1_0.getState());
+    assertSame("shard1_1 is not active", Slice.State.ACTIVE, slice1_1.getState());
+    assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
+    assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
+
+    commit();
+
+    // can't use checkShardConsistency because it insists on jettys and clients for each shard
+    checkSubShardConsistency(SHARD1_0);
+    checkSubShardConsistency(SHARD1_1);
+
+    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
+    query.set("distrib", false);
+
+    ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
+    QueryResponse response;
+    try (HttpSolrClient shard1_0Client = getHttpSolrClient(shard1_0.getCoreUrl())) {
+      response = shard1_0Client.query(query);
+    }
+    long shard10Count = response.getResults().getNumFound();
+
+    ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
+        AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
+    QueryResponse response2;
+    try (HttpSolrClient shard1_1Client = getHttpSolrClient(shard1_1.getCoreUrl())) {
+      response2 = shard1_1Client.query(query);
+    }
+    long shard11Count = response2.getResults().getNumFound();
+
+    logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
+
+    assertEquals("Wrong doc count on shard1_0. See SOLR-5309", docCounts[0], shard10Count);
+    assertEquals("Wrong doc count on shard1_1. See SOLR-5309", docCounts[1], shard11Count);
+  }
+
+  protected void checkSubShardConsistency(String shard) throws SolrServerException, IOException {
+    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
+    query.set("distrib", false);
+
+    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    Slice slice = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(shard);
+    long[] numFound = new long[slice.getReplicasMap().size()];
+    int c = 0;
+    for (Replica replica : slice.getReplicas()) {
+      String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
+      QueryResponse response;
+      try (HttpSolrClient client = getHttpSolrClient(coreUrl)) {
+        response = client.query(query);
+      }
+      numFound[c++] = response.getResults().getNumFound();
+      log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
+      assertTrue("Shard: " + shard + " Replica: " + coreUrl + " has 0 docs", response.getResults().getNumFound() > 0);
+    }
+    for (int i = 0; i < slice.getReplicasMap().size(); i++) {
+      assertEquals(shard + " is not consistent", numFound[0], numFound[i]);
+    }
+  }
+
+  protected void splitShard(String collection, String shardId, List<DocRouter.Range> subRanges, String splitKey) throws SolrServerException, IOException {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
+    params.set("collection", collection);
+    if (shardId != null)  {
+      params.set("shard", shardId);
+    }
+    if (subRanges != null)  {
+      StringBuilder ranges = new StringBuilder();
+      for (int i = 0; i < subRanges.size(); i++) {
+        DocRouter.Range subRange = subRanges.get(i);
+        ranges.append(subRange.toString());
+        if (i < subRanges.size() - 1)
+          ranges.append(",");
+      }
+      params.set("ranges", ranges.toString());
+    }
+    if (splitKey != null) {
+      params.set("split.key", splitKey);
+    }
+    SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.getSolrClient()).getBaseURL();
+    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
+
+    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 30000, 60000 * 5)) {
+      baseServer.request(request);
+    }
+  }
+
+  protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
+    index("id", id, "n_ti", n);
+
+    int idx = getHashRangeIdx(router, ranges, id);
+    if (idx != -1)  {
+      docCounts[idx]++;
+    }
+  }
+
+  protected void deleteAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
+    controlClient.deleteById(id);
+    cloudClient.deleteById(id);
+
+    int idx = getHashRangeIdx(router, ranges, id);
+    if (idx != -1)  {
+      docCounts[idx]--;
+    }
+  }
+
+  public static int getHashRangeIdx(DocRouter router, List<DocRouter.Range> ranges, String id) {
+    int hash = 0;
+    if (router instanceof HashBasedRouter) {
+      HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
+      hash = hashBasedRouter.sliceHash(id, null, null,null);
+    }
+    for (int i = 0; i < ranges.size(); i++) {
+      DocRouter.Range range = ranges.get(i);
+      if (range.includes(hash))
+        return i;
+    }
+    return -1;
+  }
+
+  protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
+    for (int i = 0; i < docCounts.length; i++) {
+      int docCount = docCounts[i];
+      log.info("Expected docCount for shard1_{} = {}", i, docCount);
+    }
+
+    log.info("Actual docCount for shard1_0 = {}", shard10Count);
+    log.info("Actual docCount for shard1_1 = {}", shard11Count);
+    Map<String, String> idVsVersion = new HashMap<>();
+    Map<String, SolrDocument> shard10Docs = new HashMap<>();
+    Map<String, SolrDocument> shard11Docs = new HashMap<>();
+    for (int i = 0; i < response.getResults().size(); i++) {
+      SolrDocument document = response.getResults().get(i);
+      idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
+      SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
+      if (old != null) {
+        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
+      }
+    }
+    for (int i = 0; i < response2.getResults().size(); i++) {
+      SolrDocument document = response2.getResults().get(i);
+      String value = document.getFieldValue("id").toString();
+      String version = idVsVersion.get(value);
+      if (version != null) {
+        log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
+      }
+      SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
+      if (old != null) {
+        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
+      }
+    }
+  }
+
+  @Override
+  protected SolrClient createNewSolrClient(String collection, String baseUrl) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
+    return client;
+  }
+
+  @Override
+  protected SolrClient createNewSolrClient(int port) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
+    return client;
+  }
+
+  @Override
+  protected CloudSolrClient createCloudClient(String defaultCollection) {
+    CloudSolrClient client = super.createCloudClient(defaultCollection);
+    return client;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
new file mode 100644
index 0000000..0b75bd5
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.cloud.OverseerCollectionConfigSetProcessor;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.junit.Test;
+
+public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestBase {
+
+  public SimpleCollectionCreateDeleteTest() {
+    sliceCount = 1;
+  }
+
+  @Test
+  @ShardsFixed(num = 1)
+  public void test() throws Exception {
+    String overseerNode = OverseerCollectionConfigSetProcessor.getLeaderNode(cloudClient.getZkStateReader().getZkClient());
+    String notOverseerNode = null;
+    for (CloudJettyRunner cloudJetty : cloudJettys) {
+      if (!overseerNode.equals(cloudJetty.nodeName)) {
+        notOverseerNode = cloudJetty.nodeName;
+        break;
+      }
+    }
+    String collectionName = "SimpleCollectionCreateDeleteTest";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,1,1)
+            .setCreateNodeSet(overseerNode)
+            .setStateFormat(2);
+
+    NamedList<Object> request = create.process(cloudClient).getResponse();
+
+    if (request.get("success") != null) {
+      assertTrue(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
+
+      CollectionAdminRequest delete = CollectionAdminRequest.deleteCollection(collectionName);
+      cloudClient.request(delete);
+
+      assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
+
+      // create collection again on a node other than the overseer leader
+      create = CollectionAdminRequest.createCollection(collectionName,1,1)
+              .setCreateNodeSet(notOverseerNode)
+              .setStateFormat(2);
+      request = create.process(cloudClient).getResponse();
+      assertTrue("Collection creation should not have failed", request.get("success") != null);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
new file mode 100644
index 0000000..5ffed50
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
@@ -0,0 +1,795 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.google.common.collect.Lists;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+
+public class TestCollectionAPI extends ReplicaPropertiesBase {
+
+  public static final String COLLECTION_NAME = "testcollection";
+  public static final String COLLECTION_NAME1 = "testcollection1";
+
+  public TestCollectionAPI() {
+    schemaString = "schema15.xml";      // we need a string id
+    sliceCount = 2;
+  }
+
+  @Test
+  @ShardsFixed(num = 2)
+  public void test() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      CollectionAdminRequest.Create req;
+      if (useTlogReplicas()) {
+        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 0, 1, 1);
+      } else {
+        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
+      }
+      req.setMaxShardsPerNode(2);
+      setV2(req);
+      client.request(req);
+      assertV2CallsCount();
+      createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
+    }
+
+    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
+    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME1, 1);
+    waitForRecoveriesToFinish(COLLECTION_NAME, false);
+    waitForRecoveriesToFinish(COLLECTION_NAME1, false);
+
+    listCollection();
+    clusterStatusNoCollection();
+    clusterStatusWithCollection();
+    clusterStatusWithCollectionAndShard();
+    clusterStatusWithRouteKey();
+    clusterStatusAliasTest();
+    clusterStatusRolesTest();
+    clusterStatusBadCollectionTest();
+    replicaPropTest();
+    clusterStatusZNodeVersion();
+    testClusterStateMigration();
+    testCollectionCreationCollectionNameValidation();
+    testCollectionCreationShardNameValidation();
+    testAliasCreationNameValidation();
+    testShardCreationNameValidation();
+  }
+
+  private void clusterStatusWithCollectionAndShard() throws IOException, SolrServerException {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", COLLECTION_NAME);
+      params.set("shard", SHARD1);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(COLLECTION_NAME));
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
+      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
+      assertEquals(1, shardStatus.size());
+      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD1);
+      assertNotNull(selectedShardStatus);
+
+    }
+  }
+
+
+  private void listCollection() throws IOException, SolrServerException {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.LIST.toString());
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      List<String> collections = (List<String>) rsp.get("collections");
+      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
+      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
+      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
+      assertTrue(COLLECTION_NAME1 + " was not found in list", collections.contains(COLLECTION_NAME1));
+    }
+
+  }
+
+  private void clusterStatusNoCollection() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(COLLECTION_NAME1));
+      assertEquals(4, collections.size());
+
+      List<String> liveNodes = (List<String>) cluster.get("live_nodes");
+      assertNotNull("Live nodes should not be null", liveNodes);
+      assertFalse(liveNodes.isEmpty());
+    }
+
+  }
+
+  private void clusterStatusWithCollection() throws IOException, SolrServerException {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", COLLECTION_NAME);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
+      assertNotNull(collection);
+      assertEquals("conf1", collection.get("configName"));
+//      assertEquals("1", collection.get("nrtReplicas"));
+    }
+  }
+
+  private void clusterStatusZNodeVersion() throws Exception {
+    String cname = "clusterStatusZNodeVersion";
+    try (CloudSolrClient client = createCloudClient(null)) {
+      setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).setMaxShardsPerNode(1)).process(client);
+      assertV2CallsCount();
+      waitForRecoveriesToFinish(cname, true);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", cname);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(cname);
+      assertNotNull(collection);
+      assertEquals("conf1", collection.get("configName"));
+      Integer znodeVersion = (Integer) collection.get("znodeVersion");
+      assertNotNull(znodeVersion);
+
+      CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
+      setV2(addReplica);
+      addReplica.process(client);
+      assertV2CallsCount();
+      waitForRecoveriesToFinish(cname, true);
+
+      rsp = client.request(request);
+      cluster = (NamedList<Object>) rsp.get("cluster");
+      collections = (NamedList<Object>) cluster.get("collections");
+      collection = (Map<String, Object>) collections.get(cname);
+      Integer newVersion = (Integer) collection.get("znodeVersion");
+      assertNotNull(newVersion);
+      assertTrue(newVersion > znodeVersion);
+    }
+  }
+
+  private static long totalexpectedV2Calls;
+
+  public static SolrRequest setV2(SolrRequest req) {
+    if (V2Request.v2Calls.get() == null) V2Request.v2Calls.set(new AtomicLong());
+    totalexpectedV2Calls = V2Request.v2Calls.get().get();
+    if (random().nextBoolean()) {
+      req.setUseV2(true);
+      req.setUseBinaryV2(random().nextBoolean());
+      totalexpectedV2Calls++;
+    }
+    return req;
+  }
+
+  public static void assertV2CallsCount() {
+    assertEquals(totalexpectedV2Calls, V2Request.v2Calls.get().get());
+  }
+
+  private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
+    try (CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION)) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", "a!123"); // goes to shard2. see ShardRoutingTest for details
+      client.add(doc);
+      client.commit();
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", DEFAULT_COLLECTION);
+      params.set(ShardParams._ROUTE_, "a!");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(DEFAULT_COLLECTION));
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
+      assertEquals("conf1", collection.get("configName"));
+      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
+      assertEquals(1, shardStatus.size());
+      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD2);
+      assertNotNull(selectedShardStatus);
+    }
+  }
+
+  private void clusterStatusAliasTest() throws Exception  {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
+      params.set("name", "myalias");
+      params.set("collections", DEFAULT_COLLECTION + "," + COLLECTION_NAME);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+      params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", DEFAULT_COLLECTION);
+      request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+
+
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      Map<String, String> aliases = (Map<String, String>) cluster.get("aliases");
+      assertNotNull("Aliases should not be null", aliases);
+      assertEquals("Alias: myalias not found in cluster status",
+          DEFAULT_COLLECTION + "," + COLLECTION_NAME, aliases.get("myalias"));
+
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(DEFAULT_COLLECTION));
+      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
+      assertEquals("conf1", collection.get("configName"));
+      List<String> collAlias = (List<String>) collection.get("aliases");
+      assertEquals("Aliases not found", Lists.newArrayList("myalias"), collAlias);
+    }
+  }
+
+  private void clusterStatusRolesTest() throws Exception  {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.ADDROLE.toString());
+      params.set("node", replica.getNodeName());
+      params.set("role", "overseer");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+
+      params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", DEFAULT_COLLECTION);
+      request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      Map<String, Object> roles = (Map<String, Object>) cluster.get("roles");
+      assertNotNull("Role information should not be null", roles);
+      List<String> overseer = (List<String>) roles.get("overseer");
+      assertNotNull(overseer);
+      assertEquals(1, overseer.size());
+      assertTrue(overseer.contains(replica.getNodeName()));
+    }
+  }
+
+  private void clusterStatusBadCollectionTest() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", "bad_collection_name");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail("Collection does not exist. An exception should be thrown");
+      } catch (SolrException e) {
+        //expected
+        assertTrue(e.getMessage().contains("Collection: bad_collection_name not found"));
+      }
+    }
+  }
+
+  private void replicaPropTest() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
+      List<String> sliceList = new ArrayList<>(slices.keySet());
+      String c1_s1 = sliceList.get(0);
+      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
+      String c1_s1_r1 = replicasList.get(0);
+      String c1_s1_r2 = replicasList.get(1);
+
+      String c1_s2 = sliceList.get(1);
+      replicasList = new ArrayList<>(slices.get(c1_s2).getReplicasMap().keySet());
+      String c1_s2_r1 = replicasList.get(0);
+      String c1_s2_r2 = replicasList.get(1);
+
+
+      slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME1).getSlicesMap();
+      sliceList = new ArrayList<>(slices.keySet());
+      String c2_s1 = sliceList.get(0);
+      replicasList = new ArrayList<>(slices.get(c2_s1).getReplicasMap().keySet());
+      String c2_s1_r1 = replicasList.get(0);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString());
+
+      // Insure we get error returns when omitting required parameters
+
+      missingParamsError(client, params);
+      params.set("collection", COLLECTION_NAME);
+      missingParamsError(client, params);
+      params.set("shard", c1_s1);
+      missingParamsError(client, params);
+      params.set("replica", c1_s1_r1);
+      missingParamsError(client, params);
+      params.set("property", "preferredLeader");
+      missingParamsError(client, params);
+      params.set("property.value", "true");
+
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+
+      // The above should have set exactly one preferredleader...
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r2,
+          "property", "preferredLeader",
+          "property.value", "true");
+      // The preferred leader property for shard1 should have switched to the other replica.
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s2,
+          "replica", c1_s2_r1,
+          "property", "preferredLeader",
+          "property.value", "true");
+
+      // Now we should have a preferred leader in both shards...
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME1,
+          "shard", c2_s1,
+          "replica", c2_s1_r1,
+          "property", "preferredLeader",
+          "property.value", "true");
+
+      // Now we should have three preferred leaders.
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME1, c2_s1_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME1,
+          "shard", c2_s1,
+          "replica", c2_s1_r1,
+          "property", "preferredLeader");
+
+      // Now we should have two preferred leaders.
+      // But first we have to wait for the overseer to finish the action
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      // Try adding an arbitrary property to one that has the leader property
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "testprop",
+          "property.value", "true");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r2,
+          "property", "prop",
+          "property.value", "silly");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "testprop",
+          "property.value", "nonsense",
+          OverseerCollectionMessageHandler.SHARD_UNIQUE, "true");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "nonsense");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "property.testprop",
+          "property.value", "true",
+          OverseerCollectionMessageHandler.SHARD_UNIQUE, "false");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "property.testprop");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+            "collection", COLLECTION_NAME,
+            "shard", c1_s1,
+            "replica", c1_s1_r1,
+            "property", "preferredLeader",
+            "property.value", "true",
+            OverseerCollectionMessageHandler.SHARD_UNIQUE, "false");
+        fail("Should have thrown an exception, setting shardUnique=false is not allowed for 'preferredLeader'.");
+      } catch (SolrException se) {
+        assertTrue("Should have received a specific error message",
+            se.getMessage().contains("with the shardUnique parameter set to something other than 'true'"));
+      }
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      Map<String, String> origProps = getProps(client, COLLECTION_NAME, c1_s1_r1,
+          "state", "core", "node_name", "base_url");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "state",
+          "property.value", "state_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "core",
+          "property.value", "core_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "node_name",
+          "property.value", "node_name_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "base_url",
+          "property.value", "base_url_bad");
+
+      // The above should be on new proeprties.
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "state", "state_bad");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "core", "core_bad");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "node_name", "node_name_bad");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "base_url", "base_url_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "state");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "core");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "node_name");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "base_url");
+
+      // They better not have been changed!
+      for (Map.Entry<String, String> ent : origProps.entrySet()) {
+        verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, ent.getKey(), ent.getValue());
+      }
+
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "state");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "core");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "node_name");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "base_url");
+
+    }
+  }
+
+  private void testClusterStateMigration() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+
+      CollectionAdminRequest.createCollection("testClusterStateMigration","conf1",1,1).setStateFormat(1).process(client);
+
+      waitForRecoveriesToFinish("testClusterStateMigration", true);
+
+      assertEquals(1, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
+
+      for (int i = 0; i < 10; i++) {
+        SolrInputDocument doc = new SolrInputDocument();
+        doc.addField("id", "id_" + i);
+        client.add("testClusterStateMigration", doc);
+      }
+      client.commit("testClusterStateMigration");
+
+      CollectionAdminRequest.migrateCollectionFormat("testClusterStateMigration").process(client);
+
+      client.getZkStateReader().forceUpdateCollection("testClusterStateMigration");
+
+      assertEquals(2, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
+
+      QueryResponse response = client.query("testClusterStateMigration", new SolrQuery("*:*"));
+      assertEquals(10, response.getResults().getNumFound());
+    }
+  }
+  
+  private void testCollectionCreationCollectionNameValidation() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
+      params.set("name", "invalid@name#with$weird%characters");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid collection"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("collection names must consist entirely of"));
+      }
+    }
+  }
+  
+  private void testCollectionCreationShardNameValidation() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
+      params.set("name", "valid_collection_name");
+      params.set("router.name", "implicit");
+      params.set("numShards", "1");
+      params.set("shards", "invalid@name#with$weird%characters");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid shard"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("shard names must consist entirely of"));
+      }
+    }
+  }
+  
+  private void testAliasCreationNameValidation() throws Exception{
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
+      params.set("name", "invalid@name#with$weird%characters");
+      params.set("collections", COLLECTION_NAME);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid alias"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("alias names must consist entirely of"));
+      }
+    }
+  }
+
+  private void testShardCreationNameValidation() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      // Create a collection w/ implicit router
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
+      params.set("name", "valid_collection_name");
+      params.set("shards", "a");
+      params.set("router.name", "implicit");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+
+      params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATESHARD.toString());
+      params.set("collection", "valid_collection_name");
+      params.set("shard", "invalid@name#with$weird%characters");
+
+      request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid shard"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("shard names must consist entirely of"));
+      }
+    }
+  }
+
+  // Expects the map will have keys, but blank values.
+  private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
+      throws KeeperException, InterruptedException {
+
+    client.getZkStateReader().forceUpdateCollection(collectionName);
+    ClusterState clusterState = client.getZkStateReader().getClusterState();
+    final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+    if (docCollection == null || docCollection.getReplica(replicaName) == null) {
+      fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+    }
+    Replica replica = docCollection.getReplica(replicaName);
+    Map<String, String> propMap = new HashMap<>();
+    for (String prop : props) {
+      propMap.put(prop, replica.getProperty(prop));
+    }
+    return propMap;
+  }
+  private void missingParamsError(CloudSolrClient client, ModifiableSolrParams origParams)
+      throws IOException, SolrServerException {
+
+    SolrRequest request;
+    try {
+      request = new QueryRequest(origParams);
+      request.setPath("/admin/collections");
+      client.request(request);
+      fail("Should have thrown a SolrException due to lack of a required parameter.");
+    } catch (SolrException se) {
+      assertTrue("Should have gotten a specific message back mentioning 'missing required parameter'. Got: " + se.getMessage(),
+          se.getMessage().toLowerCase(Locale.ROOT).contains("missing required parameter:"));
+    }
+  }
+}


[36/41] lucene-solr:jira/solr-11702: SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available

Posted by da...@apache.org.
SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/876ecd87
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/876ecd87
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/876ecd87

Branch: refs/heads/jira/solr-11702
Commit: 876ecd87fb44b828785681238915eb0e1965ad58
Parents: f72a5db
Author: Noble Paul <no...@apache.org>
Authored: Tue Jan 23 00:12:51 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Jan 23 00:12:51 2018 +1100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 ++
 .../cloud/autoscaling/MoveReplicaSuggester.java | 10 +++++++-
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |  7 +++++
 .../apache/solr/common/cloud/ZkNodeProps.java   | 11 ++++----
 .../solrj/cloud/autoscaling/TestPolicy.java     | 27 ++++++++++++++++++++
 5 files changed, 51 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/876ecd87/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7f63679..9d3cbcb 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -163,6 +163,8 @@ Other Changes
 
 * SOLR-11747: Pause triggers until actions finish executing and the cool down period expires. (shalin)
 
+* SOLR-11871: MoveReplicaSuggester should not suggest leader if other replicas are available (noble)
+
 ==================  7.2.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/876ecd87/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
index 25e75ad..d5918e5 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
@@ -18,6 +18,7 @@
 package org.apache.solr.client.solrj.cloud.autoscaling;
 
 import java.io.IOException;
+import java.util.Comparator;
 import java.util.List;
 
 import org.apache.solr.client.solrj.SolrRequest;
@@ -40,7 +41,9 @@ public class MoveReplicaSuggester extends Suggester {
     Integer targetNodeIndex = null;
     Integer sourceNodeIndex = null;
     ReplicaInfo sourceReplicaInfo = null;
-    for (Pair<ReplicaInfo, Row> fromReplica : getValidReplicas(true, true, -1)) {
+    List<Pair<ReplicaInfo, Row>> validReplicas = getValidReplicas(true, true, -1);
+    validReplicas.sort(leaderLast);
+    for (Pair<ReplicaInfo, Row> fromReplica : validReplicas) {
       Row fromRow = fromReplica.second();
       ReplicaInfo replicaInfo = fromReplica.first();
       String coll = replicaInfo.getCollection();
@@ -79,6 +82,11 @@ public class MoveReplicaSuggester extends Suggester {
     }
     return null;
   }
+  static Comparator<Pair<ReplicaInfo, Row>> leaderLast = (r1, r2) -> {
+    if (r1.first().isLeader) return 1;
+    if (r2.first().isLeader) return -1;
+    return 0;
+  };
 
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/876ecd87/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
index 930ede8..cfcd956 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
@@ -18,6 +18,7 @@
 package org.apache.solr.client.solrj.cloud.autoscaling;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Locale;
 import java.util.Map;
@@ -27,6 +28,8 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.Utils;
 
+import static org.apache.solr.common.cloud.ZkStateReader.LEADER_PROP;
+
 
 public class ReplicaInfo implements MapWriter {
 //  private final Replica replica;
@@ -34,6 +37,7 @@ public class ReplicaInfo implements MapWriter {
   private String core, collection, shard;
   private Replica.Type type;
   private String node;
+  public final boolean isLeader;
   private final Map<String, Object> variables = new HashMap<>();
 
   public ReplicaInfo(String coll, String shard, Replica r, Map<String, Object> vals) {
@@ -42,6 +46,7 @@ public class ReplicaInfo implements MapWriter {
     this.collection = coll;
     this.shard = shard;
     this.type = r.getType();
+    this.isLeader = r.getBool(LEADER_PROP, false);
     if (vals != null) {
       this.variables.putAll(vals);
     }
@@ -49,10 +54,12 @@ public class ReplicaInfo implements MapWriter {
   }
 
   public ReplicaInfo(String name, String core, String coll, String shard, Replica.Type type, String node, Map<String, Object> vals) {
+    if(vals==null) vals = Collections.emptyMap();
     this.name = name;
     if (vals != null) {
       this.variables.putAll(vals);
     }
+    this.isLeader = "true".equals(String.valueOf(vals.getOrDefault(LEADER_PROP, "false")));
     this.collection = coll;
     this.shard = shard;
     this.type = type;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/876ecd87/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
index 94a673e..93fe59a 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
@@ -16,15 +16,15 @@
  */
 package org.apache.solr.common.cloud;
 
-import org.apache.solr.common.util.Utils;
-import org.noggit.JSONUtil;
-import org.noggit.JSONWriter;
-
 import java.util.Collections;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.solr.common.util.Utils;
+import org.noggit.JSONUtil;
+import org.noggit.JSONWriter;
+
 /**
  * ZkNodeProps contains generic immutable properties.
  */
@@ -148,7 +148,8 @@ public class ZkNodeProps implements JSONWriter.Writable {
 
   public boolean getBool(String key, boolean b) {
     Object o = propMap.get(key);
-    if(o==null) return b;
+    if (o == null) return b;
+    if (o instanceof Boolean) return (boolean) o;
     return Boolean.parseBoolean(o.toString());
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/876ecd87/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index 2c119f3..2e509bb 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -1744,5 +1744,32 @@ public class TestPolicy extends SolrTestCaseJ4 {
         cloudManager, null, Arrays.asList("shard1", "shard2"), 1, 0, 0, null);
     assertTrue(locations.stream().allMatch(it -> "node3".equals(it.node)));
   }
+  public void testMoveReplicaLeaderlast(){
+
+    List<Pair<ReplicaInfo, Row>> validReplicas =  new ArrayList<>();
+    Replica replica = new Replica("r1", Utils.makeMap("leader", "true"));
+    ReplicaInfo replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>());
+    validReplicas.add(new Pair<>(replicaInfo, null));
+
+    replicaInfo = new ReplicaInfo("r4", "c1_s2_r1","c1", "s2", Replica.Type.NRT, "n1", Collections.singletonMap("leader", "true"));
+    validReplicas.add(new Pair<>(replicaInfo, null));
+
+
+    replica = new Replica("r2", Utils.makeMap("leader", false));
+    replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>());
+    validReplicas.add(new Pair<>(replicaInfo, null));
+
+    replica = new Replica("r3", Utils.makeMap("leader", false));
+    replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>());
+    validReplicas.add(new Pair<>(replicaInfo, null));
+
+
+    validReplicas.sort(MoveReplicaSuggester.leaderLast);
+    assertEquals("r2", validReplicas.get(0).first().getName());
+    assertEquals("r3", validReplicas.get(1).first().getName());
+    assertEquals("r1", validReplicas.get(2).first().getName());
+    assertEquals("r4", validReplicas.get(3).first().getName());
+
+  }
 
 }


[17/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
new file mode 100644
index 0000000..9529ee1
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -0,0 +1,1011 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.SolrResponse;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
+import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
+import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.cloud.LockTree;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.OverseerMessageHandler;
+import org.apache.solr.cloud.OverseerNodePrioritizer;
+import org.apache.solr.cloud.OverseerSolrResponse;
+import org.apache.solr.cloud.OverseerTaskProcessor;
+import org.apache.solr.cloud.Stats;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrCloseable;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.SuppressForbidden;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.handler.component.ShardHandlerFactory;
+import org.apache.solr.handler.component.ShardRequest;
+import org.apache.solr.handler.component.ShardResponse;
+import org.apache.solr.util.DefaultSolrThreadFactory;
+import org.apache.solr.util.RTimer;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
+import static org.apache.solr.common.cloud.DocCollection.SNITCH;
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.Utils.makeMap;
+
+/**
+ * A {@link OverseerMessageHandler} that handles Collections API related
+ * overseer messages.
+ */
+public class OverseerCollectionMessageHandler implements OverseerMessageHandler, SolrCloseable {
+
+  public static final String NUM_SLICES = "numShards";
+
+  public static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
+  public static final String CREATE_NODE_SET_SHUFFLE = CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
+  public static final String CREATE_NODE_SET_EMPTY = "EMPTY";
+  public static final String CREATE_NODE_SET = CollectionAdminParams.CREATE_NODE_SET_PARAM;
+
+  public static final String ROUTER = "router";
+
+  public static final String SHARDS_PROP = "shards";
+
+  public static final String REQUESTID = "requestid";
+
+  public static final String COLL_CONF = "collection.configName";
+
+  public static final String COLL_PROP_PREFIX = "property.";
+
+  public static final String ONLY_IF_DOWN = "onlyIfDown";
+
+  public static final String SHARD_UNIQUE = "shardUnique";
+
+  public static final String ONLY_ACTIVE_NODES = "onlyactivenodes";
+
+  static final String SKIP_CREATE_REPLICA_IN_CLUSTER_STATE = "skipCreateReplicaInClusterState";
+
+  public static final Map<String, Object> COLL_PROPS = Collections.unmodifiableMap(makeMap(
+      ROUTER, DocRouter.DEFAULT_NAME,
+      ZkStateReader.REPLICATION_FACTOR, "1",
+      ZkStateReader.NRT_REPLICAS, "1",
+      ZkStateReader.TLOG_REPLICAS, "0",
+      ZkStateReader.PULL_REPLICAS, "0",
+      ZkStateReader.MAX_SHARDS_PER_NODE, "1",
+      ZkStateReader.AUTO_ADD_REPLICAS, "false",
+      DocCollection.RULE, null,
+      POLICY, null,
+      SNITCH, null));
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  Overseer overseer;
+  ShardHandlerFactory shardHandlerFactory;
+  String adminPath;
+  ZkStateReader zkStateReader;
+  SolrCloudManager cloudManager;
+  String myId;
+  Stats stats;
+  TimeSource timeSource;
+
+  // Set that tracks collections that are currently being processed by a running task.
+  // This is used for handling mutual exclusion of the tasks.
+
+  final private LockTree lockTree = new LockTree();
+  ExecutorService tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
+      new SynchronousQueue<>(),
+      new DefaultSolrThreadFactory("OverseerCollectionMessageHandlerThreadFactory"));
+
+  protected static final Random RANDOM;
+  static {
+    // We try to make things reproducible in the context of our tests by initializing the random instance
+    // based on the current seed
+    String seed = System.getProperty("tests.seed");
+    if (seed == null) {
+      RANDOM = new Random();
+    } else {
+      RANDOM = new Random(seed.hashCode());
+    }
+  }
+
+  final Map<CollectionAction, Cmd> commandMap;
+
+  private volatile boolean isClosed;
+
+  public OverseerCollectionMessageHandler(ZkStateReader zkStateReader, String myId,
+                                        final ShardHandlerFactory shardHandlerFactory,
+                                        String adminPath,
+                                        Stats stats,
+                                        Overseer overseer,
+                                        OverseerNodePrioritizer overseerPrioritizer) {
+    this.zkStateReader = zkStateReader;
+    this.shardHandlerFactory = shardHandlerFactory;
+    this.adminPath = adminPath;
+    this.myId = myId;
+    this.stats = stats;
+    this.overseer = overseer;
+    this.cloudManager = overseer.getSolrCloudManager();
+    this.timeSource = cloudManager.getTimeSource();
+    this.isClosed = false;
+    commandMap = new ImmutableMap.Builder<CollectionAction, Cmd>()
+        .put(REPLACENODE, new ReplaceNodeCmd(this))
+        .put(DELETENODE, new DeleteNodeCmd(this))
+        .put(BACKUP, new BackupCmd(this))
+        .put(RESTORE, new RestoreCmd(this))
+        .put(CREATESNAPSHOT, new CreateSnapshotCmd(this))
+        .put(DELETESNAPSHOT, new DeleteSnapshotCmd(this))
+        .put(SPLITSHARD, new SplitShardCmd(this))
+        .put(ADDROLE, new OverseerRoleCmd(this, ADDROLE, overseerPrioritizer))
+        .put(REMOVEROLE, new OverseerRoleCmd(this, REMOVEROLE, overseerPrioritizer))
+        .put(MOCK_COLL_TASK, this::mockOperation)
+        .put(MOCK_SHARD_TASK, this::mockOperation)
+        .put(MOCK_REPLICA_TASK, this::mockOperation)
+        .put(MIGRATESTATEFORMAT, this::migrateStateFormat)
+        .put(CREATESHARD, new CreateShardCmd(this))
+        .put(MIGRATE, new MigrateCmd(this))
+        .put(CREATE, new CreateCollectionCmd(this))
+        .put(MODIFYCOLLECTION, this::modifyCollection)
+        .put(ADDREPLICAPROP, this::processReplicaAddPropertyCommand)
+        .put(DELETEREPLICAPROP, this::processReplicaDeletePropertyCommand)
+        .put(BALANCESHARDUNIQUE, this::balanceProperty)
+        .put(REBALANCELEADERS, this::processRebalanceLeaders)
+        .put(RELOAD, this::reloadCollection)
+        .put(DELETE, new DeleteCollectionCmd(this))
+        .put(CREATEALIAS, new CreateAliasCmd(this))
+        .put(DELETEALIAS, new DeleteAliasCmd(this))
+        .put(ROUTEDALIAS_CREATECOLL, new RoutedAliasCreateCollectionCmd(this))
+        .put(OVERSEERSTATUS, new OverseerStatusCmd(this))
+        .put(DELETESHARD, new DeleteShardCmd(this))
+        .put(DELETEREPLICA, new DeleteReplicaCmd(this))
+        .put(ADDREPLICA, new AddReplicaCmd(this))
+        .put(MOVEREPLICA, new MoveReplicaCmd(this))
+        .put(UTILIZENODE, new UtilizeNodeCmd(this))
+        .build()
+    ;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public SolrResponse processMessage(ZkNodeProps message, String operation) {
+    log.debug("OverseerCollectionMessageHandler.processMessage : {} , {}", operation, message);
+
+    NamedList results = new NamedList();
+    try {
+      CollectionAction action = getCollectionAction(operation);
+      Cmd command = commandMap.get(action);
+      if (command != null) {
+        command.call(cloudManager.getClusterStateProvider().getClusterState(), message, results);
+      } else {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
+            + operation);
+      }
+    } catch (Exception e) {
+      String collName = message.getStr("collection");
+      if (collName == null) collName = message.getStr(NAME);
+
+      if (collName == null) {
+        SolrException.log(log, "Operation " + operation + " failed", e);
+      } else  {
+        SolrException.log(log, "Collection: " + collName + " operation: " + operation
+            + " failed", e);
+      }
+
+      results.add("Operation " + operation + " caused exception:", e);
+      SimpleOrderedMap nl = new SimpleOrderedMap();
+      nl.add("msg", e.getMessage());
+      nl.add("rspCode", e instanceof SolrException ? ((SolrException)e).code() : -1);
+      results.add("exception", nl);
+    }
+    return new OverseerSolrResponse(results);
+  }
+
+  @SuppressForbidden(reason = "Needs currentTimeMillis for mock requests")
+  private void mockOperation(ClusterState state, ZkNodeProps message, NamedList results) throws InterruptedException {
+    //only for test purposes
+    Thread.sleep(message.getInt("sleep", 1));
+    log.info("MOCK_TASK_EXECUTED time {} data {}", System.currentTimeMillis(), Utils.toJSONString(message));
+    results.add("MOCK_FINISHED", System.currentTimeMillis());
+  }
+
+  private CollectionAction getCollectionAction(String operation) {
+    CollectionAction action = CollectionAction.get(operation);
+    if (action == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
+    }
+    return action;
+  }
+
+  private void reloadCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.RELOAD.toString());
+
+    String asyncId = message.getStr(ASYNC);
+    Map<String, String> requestMap = null;
+    if (asyncId != null) {
+      requestMap = new HashMap<>();
+    }
+    collectionCmd(message, params, results, Replica.State.ACTIVE, asyncId, requestMap);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void processRebalanceLeaders(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP,
+        CORE_NODE_NAME_PROP, BASE_URL_PROP, REJOIN_AT_HEAD_PROP);
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(COLLECTION_PROP, message.getStr(COLLECTION_PROP));
+    params.set(SHARD_ID_PROP, message.getStr(SHARD_ID_PROP));
+    params.set(REJOIN_AT_HEAD_PROP, message.getStr(REJOIN_AT_HEAD_PROP));
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.REJOINLEADERELECTION.toString());
+    params.set(CORE_NAME_PROP, message.getStr(CORE_NAME_PROP));
+    params.set(CORE_NODE_NAME_PROP, message.getStr(CORE_NODE_NAME_PROP));
+    params.set(ELECTION_NODE_PROP, message.getStr(ELECTION_NODE_PROP));
+    params.set(BASE_URL_PROP, message.getStr(BASE_URL_PROP));
+
+    String baseUrl = message.getStr(BASE_URL_PROP);
+    ShardRequest sreq = new ShardRequest();
+    sreq.nodeName = message.getStr(ZkStateReader.CORE_NAME_PROP);
+    // yes, they must use same admin handler path everywhere...
+    params.set("qt", adminPath);
+    sreq.purpose = ShardRequest.PURPOSE_PRIVATE;
+    sreq.shards = new String[] {baseUrl};
+    sreq.actualShards = sreq.shards;
+    sreq.params = params;
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    shardHandler.submit(sreq, baseUrl, sreq.params);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void processReplicaAddPropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP, PROPERTY_VALUE_PROP);
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
+    Map<String, Object> propMap = new HashMap<>();
+    propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICAPROP.toLower());
+    propMap.putAll(message.getProperties());
+    ZkNodeProps m = new ZkNodeProps(propMap);
+    inQueue.offer(Utils.toJSON(m));
+  }
+
+  private void processReplicaDeletePropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP);
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
+    Map<String, Object> propMap = new HashMap<>();
+    propMap.put(Overseer.QUEUE_OPERATION, DELETEREPLICAPROP.toLower());
+    propMap.putAll(message.getProperties());
+    ZkNodeProps m = new ZkNodeProps(propMap);
+    inQueue.offer(Utils.toJSON(m));
+  }
+
+  private void balanceProperty(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) || StringUtils.isBlank(message.getStr(PROPERTY_PROP))) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "The '" + COLLECTION_PROP + "' and '" + PROPERTY_PROP +
+              "' parameters are required for the BALANCESHARDUNIQUE operation, no action taken");
+    }
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
+    Map<String, Object> propMap = new HashMap<>();
+    propMap.put(Overseer.QUEUE_OPERATION, BALANCESHARDUNIQUE.toLower());
+    propMap.putAll(message.getProperties());
+    inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+  }
+
+  /**
+   * Walks the tree of collection status to verify that any replicas not reporting a "down" status is
+   * on a live node, if any replicas reporting their status as "active" but the node is not live is
+   * marked as "down"; used by CLUSTERSTATUS.
+   * @param liveNodes List of currently live node names.
+   * @param collectionProps Map of collection status information pulled directly from ZooKeeper.
+   */
+
+  @SuppressWarnings("unchecked")
+  protected void crossCheckReplicaStateWithLiveNodes(List<String> liveNodes, NamedList<Object> collectionProps) {
+    Iterator<Map.Entry<String,Object>> colls = collectionProps.iterator();
+    while (colls.hasNext()) {
+      Map.Entry<String,Object> next = colls.next();
+      Map<String,Object> collMap = (Map<String,Object>)next.getValue();
+      Map<String,Object> shards = (Map<String,Object>)collMap.get("shards");
+      for (Object nextShard : shards.values()) {
+        Map<String,Object> shardMap = (Map<String,Object>)nextShard;
+        Map<String,Object> replicas = (Map<String,Object>)shardMap.get("replicas");
+        for (Object nextReplica : replicas.values()) {
+          Map<String,Object> replicaMap = (Map<String,Object>)nextReplica;
+          if (Replica.State.getState((String) replicaMap.get(ZkStateReader.STATE_PROP)) != Replica.State.DOWN) {
+            // not down, so verify the node is live
+            String node_name = (String)replicaMap.get(ZkStateReader.NODE_NAME_PROP);
+            if (!liveNodes.contains(node_name)) {
+              // node is not live, so this replica is actually down
+              replicaMap.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Get collection status from cluster state.
+   * Can return collection status by given shard name.
+   *
+   *
+   * @param collection collection map parsed from JSON-serialized {@link ClusterState}
+   * @param name  collection name
+   * @param requestedShards a set of shards to be returned in the status.
+   *                        An empty or null values indicates <b>all</b> shards.
+   * @return map of collection properties
+   */
+  @SuppressWarnings("unchecked")
+  private Map<String, Object> getCollectionStatus(Map<String, Object> collection, String name, Set<String> requestedShards) {
+    if (collection == null)  {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
+    }
+    if (requestedShards == null || requestedShards.isEmpty()) {
+      return collection;
+    } else {
+      Map<String, Object> shards = (Map<String, Object>) collection.get("shards");
+      Map<String, Object>  selected = new HashMap<>();
+      for (String selectedShard : requestedShards) {
+        if (!shards.containsKey(selectedShard)) {
+          throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " shard: " + selectedShard + " not found");
+        }
+        selected.put(selectedShard, shards.get(selectedShard));
+        collection.put("shards", selected);
+      }
+      return collection;
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+      throws Exception {
+    ((DeleteReplicaCmd) commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, message, results, onComplete);
+
+  }
+
+  boolean waitForCoreNodeGone(String collectionName, String shard, String replicaName, int timeoutms) throws InterruptedException {
+    TimeOut timeout = new TimeOut(timeoutms, TimeUnit.MILLISECONDS, timeSource);
+    while (! timeout.hasTimedOut()) {
+      timeout.sleep(100);
+      DocCollection docCollection = zkStateReader.getClusterState().getCollection(collectionName);
+      if (docCollection == null) { // someone already deleted the collection
+        return true;
+      }
+      Slice slice = docCollection.getSlice(shard);
+      if(slice == null || slice.getReplica(replicaName) == null) {
+        return true;
+      }
+    }
+    // replica still exists after the timeout
+    return false;
+  }
+
+  void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws Exception {
+    ZkNodeProps m = new ZkNodeProps(
+        Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
+        ZkStateReader.CORE_NAME_PROP, core,
+        ZkStateReader.NODE_NAME_PROP, replica.getStr(ZkStateReader.NODE_NAME_PROP),
+        ZkStateReader.COLLECTION_PROP, collectionName,
+        ZkStateReader.CORE_NODE_NAME_PROP, replicaName,
+        ZkStateReader.BASE_URL_PROP, replica.getStr(ZkStateReader.BASE_URL_PROP));
+    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+  }
+
+  void checkRequired(ZkNodeProps message, String... props) {
+    for (String prop : props) {
+      if(message.get(prop) == null){
+        throw new SolrException(ErrorCode.BAD_REQUEST, StrUtils.join(Arrays.asList(props),',') +" are required params" );
+      }
+    }
+
+  }
+
+  //TODO should we not remove in the next release ?
+  private void migrateStateFormat(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    final String collectionName = message.getStr(COLLECTION_PROP);
+
+    boolean firstLoop = true;
+    // wait for a while until the state format changes
+    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+    while (! timeout.hasTimedOut()) {
+      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
+      if (collection == null) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + collectionName + " not found");
+      }
+      if (collection.getStateFormat() == 2) {
+        // Done.
+        results.add("success", new SimpleOrderedMap<>());
+        return;
+      }
+
+      if (firstLoop) {
+        // Actually queue the migration command.
+        firstLoop = false;
+        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, MIGRATESTATEFORMAT.toLower(), COLLECTION_PROP, collectionName);
+        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+      }
+      timeout.sleep(100);
+    }
+    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not migrate state format for collection: " + collectionName);
+  }
+
+  void commit(NamedList results, String slice, Replica parentShardLeader) {
+    log.debug("Calling soft commit to make sub shard updates visible");
+    String coreUrl = new ZkCoreNodeProps(parentShardLeader).getCoreUrl();
+    // HttpShardHandler is hard coded to send a QueryRequest hence we go direct
+    // and we force open a searcher so that we have documents to show upon switching states
+    UpdateResponse updateResponse = null;
+    try {
+      updateResponse = softCommit(coreUrl);
+      processResponse(results, null, coreUrl, updateResponse, slice, Collections.emptySet());
+    } catch (Exception e) {
+      processResponse(results, e, coreUrl, updateResponse, slice, Collections.emptySet());
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to call distrib softCommit on: " + coreUrl, e);
+    }
+  }
+
+
+  static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
+
+    try (HttpSolrClient client = new HttpSolrClient.Builder(url)
+        .withConnectionTimeout(30000)
+        .withSocketTimeout(120000)
+        .build()) {
+      UpdateRequest ureq = new UpdateRequest();
+      ureq.setParams(new ModifiableSolrParams());
+      ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
+      return ureq.process(client);
+    }
+  }
+
+  String waitForCoreNodeName(String collectionName, String msgNodeName, String msgCore) {
+    int retryCount = 320;
+    while (retryCount-- > 0) {
+      final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
+      if (docCollection != null && docCollection.getSlicesMap() != null) {
+        Map<String,Slice> slicesMap = docCollection.getSlicesMap();
+        for (Slice slice : slicesMap.values()) {
+          for (Replica replica : slice.getReplicas()) {
+            // TODO: for really large clusters, we could 'index' on this
+
+            String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
+            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+
+            if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
+              return replica.getName();
+            }
+          }
+        }
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
+  }
+
+  void waitForNewShard(String collectionName, String sliceName) throws KeeperException, InterruptedException {
+    log.debug("Waiting for slice {} of collection {} to be available", sliceName, collectionName);
+    RTimer timer = new RTimer();
+    int retryCount = 320;
+    while (retryCount-- > 0) {
+      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
+      if (collection == null) {
+        throw new SolrException(ErrorCode.SERVER_ERROR,
+            "Unable to find collection: " + collectionName + " in clusterstate");
+      }
+      Slice slice = collection.getSlice(sliceName);
+      if (slice != null) {
+        log.debug("Waited for {}ms for slice {} of collection {} to be available",
+            timer.getTime(), sliceName, collectionName);
+        return;
+      }
+      Thread.sleep(1000);
+    }
+    throw new SolrException(ErrorCode.SERVER_ERROR,
+        "Could not find new slice " + sliceName + " in collection " + collectionName
+            + " even after waiting for " + timer.getTime() + "ms"
+    );
+  }
+
+  DocRouter.Range intersect(DocRouter.Range a, DocRouter.Range b) {
+    if (a == null || b == null || !a.overlaps(b)) {
+      return null;
+    } else if (a.isSubsetOf(b))
+      return a;
+    else if (b.isSubsetOf(a))
+      return b;
+    else if (b.includes(a.max)) {
+      return new DocRouter.Range(b.min, a.max);
+    } else  {
+      return new DocRouter.Range(a.min, b.max);
+    }
+  }
+
+  void sendShardRequest(String nodeName, ModifiableSolrParams params,
+                        ShardHandler shardHandler, String asyncId,
+                        Map<String, String> requestMap) {
+    sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap, adminPath, zkStateReader);
+
+  }
+
+  public static void sendShardRequest(String nodeName, ModifiableSolrParams params, ShardHandler shardHandler,
+                                      String asyncId, Map<String, String> requestMap, String adminPath,
+                                      ZkStateReader zkStateReader) {
+    if (asyncId != null) {
+      String coreAdminAsyncId = asyncId + Math.abs(System.nanoTime());
+      params.set(ASYNC, coreAdminAsyncId);
+      requestMap.put(nodeName, coreAdminAsyncId);
+    }
+
+    ShardRequest sreq = new ShardRequest();
+    params.set("qt", adminPath);
+    sreq.purpose = 1;
+    String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
+    sreq.shards = new String[]{replica};
+    sreq.actualShards = sreq.shards;
+    sreq.nodeName = nodeName;
+    sreq.params = params;
+
+    shardHandler.submit(sreq, replica, sreq.params);
+  }
+
+  void addPropertyParams(ZkNodeProps message, ModifiableSolrParams params) {
+    // Now add the property.key=value pairs
+    for (String key : message.keySet()) {
+      if (key.startsWith(COLL_PROP_PREFIX)) {
+        params.set(key, message.getStr(key));
+      }
+    }
+  }
+
+  void addPropertyParams(ZkNodeProps message, Map<String, Object> map) {
+    // Now add the property.key=value pairs
+    for (String key : message.keySet()) {
+      if (key.startsWith(COLL_PROP_PREFIX)) {
+        map.put(key, message.getStr(key));
+      }
+    }
+  }
+
+
+  private void modifyCollection(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    
+    final String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
+    //the rest of the processing is based on writing cluster state properties
+    //remove the property here to avoid any errors down the pipeline due to this property appearing
+    String configName = (String) message.getProperties().remove(COLL_CONF);
+    
+    if(configName != null) {
+      validateConfigOrThrowSolrException(configName);
+      
+      boolean isLegacyCloud =  Overseer.isLegacy(zkStateReader);
+      createConfNode(cloudManager.getDistribStateManager(), configName, collectionName, isLegacyCloud);
+      reloadCollection(null, new ZkNodeProps(NAME, collectionName), results);
+    }
+    
+    overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+
+    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+    boolean areChangesVisible = true;
+    while (!timeout.hasTimedOut()) {
+      DocCollection collection = cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName);
+      areChangesVisible = true;
+      for (Map.Entry<String,Object> updateEntry : message.getProperties().entrySet()) {
+        String updateKey = updateEntry.getKey();
+        if (!updateKey.equals(ZkStateReader.COLLECTION_PROP)
+            && !updateKey.equals(Overseer.QUEUE_OPERATION)
+            && !collection.get(updateKey).equals(updateEntry.getValue())){
+          areChangesVisible = false;
+          break;
+        }
+      }
+      if (areChangesVisible) break;
+      timeout.sleep(100);
+    }
+
+    if (!areChangesVisible)
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not modify collection " + message);
+  }
+
+  void cleanupCollection(String collectionName, NamedList results) throws Exception {
+    log.error("Cleaning up collection [" + collectionName + "]." );
+    Map<String, Object> props = makeMap(
+        Overseer.QUEUE_OPERATION, DELETE.toLower(),
+        NAME, collectionName);
+    commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
+  }
+
+  Map<String, Replica> waitToSeeReplicasInState(String collectionName, Collection<String> coreNames) throws InterruptedException {
+    Map<String, Replica> result = new HashMap<>();
+    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+    while (true) {
+      DocCollection coll = zkStateReader.getClusterState().getCollection(collectionName);
+      for (String coreName : coreNames) {
+        if (result.containsKey(coreName)) continue;
+        for (Slice slice : coll.getSlices()) {
+          for (Replica replica : slice.getReplicas()) {
+            if (coreName.equals(replica.getStr(ZkStateReader.CORE_NAME_PROP))) {
+              result.put(coreName, replica);
+              break;
+            }
+          }
+        }
+      }
+      
+      if (result.size() == coreNames.size()) {
+        return result;
+      } else {
+        log.debug("Expecting {} cores but found {}", coreNames, result);
+      }
+      if (timeout.hasTimedOut()) {
+        throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out waiting to see all replicas: " + coreNames + " in cluster state. Last state: " + coll);
+      }
+      
+      Thread.sleep(100);
+    }
+  }
+
+  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+      throws Exception {
+
+    return ((AddReplicaCmd) commandMap.get(ADDREPLICA)).addReplica(clusterState, message, results, onComplete);
+  }
+
+  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
+                        String asyncId, Map<String, String> requestMap) {
+    processResponses(results, shardHandler, abortOnError, msgOnError, asyncId, requestMap, Collections.emptySet());
+  }
+
+  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
+                                String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
+    //Processes all shard responses
+    ShardResponse srsp;
+    do {
+      srsp = shardHandler.takeCompletedOrError();
+      if (srsp != null) {
+        processResponse(results, srsp, okayExceptions);
+        Throwable exception = srsp.getException();
+        if (abortOnError && exception != null)  {
+          // drain pending requests
+          while (srsp != null)  {
+            srsp = shardHandler.takeCompletedOrError();
+          }
+          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, exception);
+        }
+      }
+    } while (srsp != null);
+
+    //If request is async wait for the core admin to complete before returning
+    if (asyncId != null) {
+      waitForAsyncCallsToComplete(requestMap, results);
+      requestMap.clear();
+    }
+  }
+
+
+  void validateConfigOrThrowSolrException(String configName) throws IOException, KeeperException, InterruptedException {
+    boolean isValid = cloudManager.getDistribStateManager().hasData(ZkConfigManager.CONFIGS_ZKNODE + "/" + configName);
+    if(!isValid) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Can not find the specified config set: " + configName);
+    }
+  }
+
+  /**
+   * This doesn't validate the config (path) itself and is just responsible for creating the confNode.
+   * That check should be done before the config node is created.
+   */
+  public static void createConfNode(DistribStateManager stateManager, String configName, String coll, boolean isLegacyCloud) throws IOException, AlreadyExistsException, BadVersionException, KeeperException, InterruptedException {
+    
+    if (configName != null) {
+      String collDir = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll;
+      log.debug("creating collections conf node {} ", collDir);
+      byte[] data = Utils.toJSON(makeMap(ZkController.CONFIGNAME_PROP, configName));
+      if (stateManager.hasData(collDir)) {
+        stateManager.setData(collDir, data, -1);
+      } else {
+        stateManager.makePath(collDir, data, CreateMode.PERSISTENT, false);
+      }
+    } else {
+      if(isLegacyCloud){
+        log.warn("Could not obtain config name");
+      } else {
+        throw new SolrException(ErrorCode.BAD_REQUEST,"Unable to get config name");
+      }
+    }
+  }
+  
+  private void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
+                             NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap) {
+    collectionCmd( message, params, results, stateMatcher, asyncId, requestMap, Collections.emptySet());
+  }
+
+
+  void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
+                     NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
+    log.info("Executing Collection Cmd : " + params);
+    String collectionName = message.getStr(NAME);
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+
+    ClusterState clusterState = zkStateReader.getClusterState();
+    DocCollection coll = clusterState.getCollection(collectionName);
+    
+    for (Slice slice : coll.getSlices()) {
+      sliceCmd(clusterState, params, stateMatcher, slice, shardHandler, asyncId, requestMap);
+    }
+
+    processResponses(results, shardHandler, false, null, asyncId, requestMap, okayExceptions);
+
+  }
+
+  void sliceCmd(ClusterState clusterState, ModifiableSolrParams params, Replica.State stateMatcher,
+                Slice slice, ShardHandler shardHandler, String asyncId, Map<String, String> requestMap) {
+
+    for (Replica replica : slice.getReplicas()) {
+      if (clusterState.liveNodesContain(replica.getStr(ZkStateReader.NODE_NAME_PROP))
+          && (stateMatcher == null || Replica.State.getState(replica.getStr(ZkStateReader.STATE_PROP)) == stateMatcher)) {
+
+        // For thread safety, only simple clone the ModifiableSolrParams
+        ModifiableSolrParams cloneParams = new ModifiableSolrParams();
+        cloneParams.add(params);
+        cloneParams.set(CoreAdminParams.CORE, replica.getStr(ZkStateReader.CORE_NAME_PROP));
+
+        sendShardRequest(replica.getStr(ZkStateReader.NODE_NAME_PROP), cloneParams, shardHandler, asyncId, requestMap);
+      }
+    }
+  }
+  
+  private void processResponse(NamedList results, ShardResponse srsp, Set<String> okayExceptions) {
+    Throwable e = srsp.getException();
+    String nodeName = srsp.getNodeName();
+    SolrResponse solrResponse = srsp.getSolrResponse();
+    String shard = srsp.getShard();
+
+    processResponse(results, e, nodeName, solrResponse, shard, okayExceptions);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void processResponse(NamedList results, Throwable e, String nodeName, SolrResponse solrResponse, String shard, Set<String> okayExceptions) {
+    String rootThrowable = null;
+    if (e instanceof RemoteSolrException) {
+      rootThrowable = ((RemoteSolrException) e).getRootThrowable();
+    }
+
+    if (e != null && (rootThrowable == null || !okayExceptions.contains(rootThrowable))) {
+      log.error("Error from shard: " + shard, e);
+
+      SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
+      if (failure == null) {
+        failure = new SimpleOrderedMap();
+        results.add("failure", failure);
+      }
+
+      failure.add(nodeName, e.getClass().getName() + ":" + e.getMessage());
+
+    } else {
+
+      SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
+      if (success == null) {
+        success = new SimpleOrderedMap();
+        results.add("success", success);
+      }
+
+      success.add(nodeName, solrResponse.getResponse());
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private void waitForAsyncCallsToComplete(Map<String, String> requestMap, NamedList results) {
+    for (String k:requestMap.keySet()) {
+      log.debug("I am Waiting for :{}/{}", k, requestMap.get(k));
+      results.add(requestMap.get(k), waitForCoreAdminAsyncCallToComplete(k, requestMap.get(k)));
+    }
+  }
+
+  private NamedList waitForCoreAdminAsyncCallToComplete(String nodeName, String requestId) {
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.REQUESTSTATUS.toString());
+    params.set(CoreAdminParams.REQUESTID, requestId);
+    int counter = 0;
+    ShardRequest sreq;
+    do {
+      sreq = new ShardRequest();
+      params.set("qt", adminPath);
+      sreq.purpose = 1;
+      String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
+      sreq.shards = new String[] {replica};
+      sreq.actualShards = sreq.shards;
+      sreq.params = params;
+
+      shardHandler.submit(sreq, replica, sreq.params);
+
+      ShardResponse srsp;
+      do {
+        srsp = shardHandler.takeCompletedOrError();
+        if (srsp != null) {
+          NamedList results = new NamedList();
+          processResponse(results, srsp, Collections.emptySet());
+          if (srsp.getSolrResponse().getResponse() == null) {
+            NamedList response = new NamedList();
+            response.add("STATUS", "failed");
+            return response;
+          }
+          
+          String r = (String) srsp.getSolrResponse().getResponse().get("STATUS");
+          if (r.equals("running")) {
+            log.debug("The task is still RUNNING, continuing to wait.");
+            try {
+              Thread.sleep(1000);
+            } catch (InterruptedException e) {
+              Thread.currentThread().interrupt();
+            }
+            continue;
+
+          } else if (r.equals("completed")) {
+            log.debug("The task is COMPLETED, returning");
+            return srsp.getSolrResponse().getResponse();
+          } else if (r.equals("failed")) {
+            // TODO: Improve this. Get more information.
+            log.debug("The task is FAILED, returning");
+            return srsp.getSolrResponse().getResponse();
+          } else if (r.equals("notfound")) {
+            log.debug("The task is notfound, retry");
+            if (counter++ < 5) {
+              try {
+                Thread.sleep(1000);
+              } catch (InterruptedException e) {
+              }
+              break;
+            }
+            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request for requestId: " + requestId + "" + srsp.getSolrResponse().getResponse().get("STATUS") +
+                "retried " + counter + "times");
+          } else {
+            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request " + srsp.getSolrResponse().getResponse().get("STATUS"));
+          }
+        }
+      } while (srsp != null);
+    } while(true);
+  }
+
+  @Override
+  public String getName() {
+    return "Overseer Collection Message Handler";
+  }
+
+  @Override
+  public String getTimerName(String operation) {
+    return "collection_" + operation;
+  }
+
+  @Override
+  public String getTaskKey(ZkNodeProps message) {
+    return message.containsKey(COLLECTION_PROP) ?
+      message.getStr(COLLECTION_PROP) : message.getStr(NAME);
+  }
+
+
+  private long sessionId = -1;
+  private LockTree.Session lockSession;
+
+  @Override
+  public Lock lockTask(ZkNodeProps message, OverseerTaskProcessor.TaskBatch taskBatch) {
+    if (lockSession == null || sessionId != taskBatch.getId()) {
+      //this is always called in the same thread.
+      //Each batch is supposed to have a new taskBatch
+      //So if taskBatch changes we must create a new Session
+      // also check if the running tasks are empty. If yes, clear lockTree
+      // this will ensure that locks are not 'leaked'
+      if(taskBatch.getRunningTasks() == 0) lockTree.clear();
+      lockSession = lockTree.getSession();
+    }
+    return lockSession.lock(getCollectionAction(message.getStr(Overseer.QUEUE_OPERATION)),
+        Arrays.asList(
+            getTaskKey(message),
+            message.getStr(ZkStateReader.SHARD_ID_PROP),
+            message.getStr(ZkStateReader.REPLICA_PROP))
+
+    );
+  }
+
+
+  @Override
+  public void close() throws IOException {
+    this.isClosed = true;
+    if (tpe != null) {
+      if (!tpe.isShutdown()) {
+        ExecutorUtil.shutdownAndAwaitTermination(tpe);
+      }
+    }
+  }
+
+  @Override
+  public boolean isClosed() {
+    return isClosed;
+  }
+
+  protected interface Cmd {
+    void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
new file mode 100644
index 0000000..16f9327
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.solr.cloud.OverseerNodePrioritizer;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
+
+public class OverseerRoleCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+  private final CollectionAction operation;
+  private final OverseerNodePrioritizer overseerPrioritizer;
+
+
+
+  public OverseerRoleCmd(OverseerCollectionMessageHandler ocmh, CollectionAction operation, OverseerNodePrioritizer prioritizer) {
+    this.ocmh = ocmh;
+    this.operation = operation;
+    this.overseerPrioritizer = prioritizer;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    Map roles = null;
+    String node = message.getStr("node");
+
+    String roleName = message.getStr("role");
+    boolean nodeExists = false;
+    if (nodeExists = zkClient.exists(ZkStateReader.ROLES, true)) {
+      roles = (Map) Utils.fromJSON(zkClient.getData(ZkStateReader.ROLES, null, new Stat(), true));
+    } else {
+      roles = new LinkedHashMap(1);
+    }
+
+    List nodeList = (List) roles.get(roleName);
+    if (nodeList == null) roles.put(roleName, nodeList = new ArrayList());
+    if (ADDROLE == operation) {
+      log.info("Overseer role added to {}", node);
+      if (!nodeList.contains(node)) nodeList.add(node);
+    } else if (REMOVEROLE == operation) {
+      log.info("Overseer role removed from {}", node);
+      nodeList.remove(node);
+    }
+
+    if (nodeExists) {
+      zkClient.setData(ZkStateReader.ROLES, Utils.toJSON(roles), true);
+    } else {
+      zkClient.create(ZkStateReader.ROLES, Utils.toJSON(roles), CreateMode.PERSISTENT, true);
+    }
+    //if there are too many nodes this command may time out. And most likely dedicated
+    // overseers are created when there are too many nodes  . So , do this operation in a separate thread
+    new Thread(() -> {
+      try {
+        overseerPrioritizer.prioritizeOverseerNodes(ocmh.myId);
+      } catch (Exception e) {
+        log.error("Error in prioritizing Overseer", e);
+      }
+
+    }).start();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
new file mode 100644
index 0000000..6f0bbfd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
@@ -0,0 +1,113 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import com.codahale.metrics.Timer;
+import org.apache.solr.cloud.OverseerTaskProcessor;
+import org.apache.solr.cloud.Stats;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.util.stats.MetricUtils;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class OverseerStatusCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public OverseerStatusCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    String leaderNode = OverseerTaskProcessor.getLeaderNode(zkStateReader.getZkClient());
+    results.add("leader", leaderNode);
+    Stat stat = new Stat();
+    zkStateReader.getZkClient().getData("/overseer/queue",null, stat, true);
+    results.add("overseer_queue_size", stat.getNumChildren());
+    stat = new Stat();
+    zkStateReader.getZkClient().getData("/overseer/queue-work",null, stat, true);
+    results.add("overseer_work_queue_size", stat.getNumChildren());
+    stat = new Stat();
+    zkStateReader.getZkClient().getData("/overseer/collection-queue-work",null, stat, true);
+    results.add("overseer_collection_queue_size", stat.getNumChildren());
+
+    NamedList overseerStats = new NamedList();
+    NamedList collectionStats = new NamedList();
+    NamedList stateUpdateQueueStats = new NamedList();
+    NamedList workQueueStats = new NamedList();
+    NamedList collectionQueueStats = new NamedList();
+    Stats stats = ocmh.stats;
+    for (Map.Entry<String, Stats.Stat> entry : stats.getStats().entrySet()) {
+      String key = entry.getKey();
+      NamedList<Object> lst = new SimpleOrderedMap<>();
+      if (key.startsWith("collection_"))  {
+        collectionStats.add(key.substring(11), lst);
+        int successes = stats.getSuccessCount(entry.getKey());
+        int errors = stats.getErrorCount(entry.getKey());
+        lst.add("requests", successes);
+        lst.add("errors", errors);
+        List<Stats.FailedOp> failureDetails = stats.getFailureDetails(key);
+        if (failureDetails != null) {
+          List<SimpleOrderedMap<Object>> failures = new ArrayList<>();
+          for (Stats.FailedOp failedOp : failureDetails) {
+            SimpleOrderedMap<Object> fail = new SimpleOrderedMap<>();
+            fail.add("request", failedOp.req.getProperties());
+            fail.add("response", failedOp.resp.getResponse());
+            failures.add(fail);
+          }
+          lst.add("recent_failures", failures);
+        }
+      } else if (key.startsWith("/overseer/queue_"))  {
+        stateUpdateQueueStats.add(key.substring(16), lst);
+      } else if (key.startsWith("/overseer/queue-work_"))  {
+        workQueueStats.add(key.substring(21), lst);
+      } else if (key.startsWith("/overseer/collection-queue-work_"))  {
+        collectionQueueStats.add(key.substring(32), lst);
+      } else  {
+        // overseer stats
+        overseerStats.add(key, lst);
+        int successes = stats.getSuccessCount(entry.getKey());
+        int errors = stats.getErrorCount(entry.getKey());
+        lst.add("requests", successes);
+        lst.add("errors", errors);
+      }
+      Timer timer = entry.getValue().requestTime;
+      MetricUtils.addMetrics(lst, timer);
+    }
+    results.add("overseer_operations", overseerStats);
+    results.add("collection_operations", collectionStats);
+    results.add("overseer_queue", stateUpdateQueueStats);
+    results.add("overseer_internal_queue", workQueueStats);
+    results.add("collection_queue", collectionQueueStats);
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
new file mode 100644
index 0000000..35d2379
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.solr.cloud.ActiveReplicaWatcher;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public ReplaceNodeCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    String source = message.getStr(CollectionParams.SOURCE_NODE, message.getStr("source"));
+    String target = message.getStr(CollectionParams.TARGET_NODE, message.getStr("target"));
+    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+    if (source == null || target == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sourceNode and targetNode are required params" );
+    }
+    String async = message.getStr("async");
+    int timeout = message.getInt("timeout", 10 * 60); // 10 minutes
+    boolean parallel = message.getBool("parallel", false);
+    ClusterState clusterState = zkStateReader.getClusterState();
+
+    if (!clusterState.liveNodesContain(source)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + source + " is not live");
+    }
+    if (!clusterState.liveNodesContain(target)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target Node: " + target + " is not live");
+    }
+    List<ZkNodeProps> sourceReplicas = getReplicasOfNode(source, clusterState);
+    // how many leaders are we moving? for these replicas we have to make sure that either:
+    // * another existing replica can become a leader, or
+    // * we wait until the newly created replica completes recovery (and can become the new leader)
+    // If waitForFinalState=true we wait for all replicas
+    int numLeaders = 0;
+    for (ZkNodeProps props : sourceReplicas) {
+      if (props.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
+        numLeaders++;
+      }
+    }
+    // map of collectionName_coreNodeName to watchers
+    Map<String, CollectionStateWatcher> watchers = new HashMap<>();
+    List<ZkNodeProps> createdReplicas = new ArrayList<>();
+
+    AtomicBoolean anyOneFailed = new AtomicBoolean(false);
+    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(sourceReplicas.size(), ocmh);
+
+    SolrCloseableLatch replicasToRecover = new SolrCloseableLatch(numLeaders, ocmh);
+
+    for (ZkNodeProps sourceReplica : sourceReplicas) {
+      NamedList nl = new NamedList();
+      log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+      ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
+      if(async!=null) msg.getProperties().put(ASYNC, async);
+      final ZkNodeProps addedReplica = ocmh.addReplica(clusterState,
+          msg, nl, () -> {
+            countDownLatch.countDown();
+            if (nl.get("failure") != null) {
+              String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+                  " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+              log.warn(errorString);
+              // one replica creation failed. Make the best attempt to
+              // delete all the replicas created so far in the target
+              // and exit
+              synchronized (results) {
+                results.add("failure", errorString);
+                anyOneFailed.set(true);
+              }
+            } else {
+              log.debug("Successfully created replica for collection={} shard={} on node={}",
+                  sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+            }
+          });
+
+      if (addedReplica != null) {
+        createdReplicas.add(addedReplica);
+        if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
+          String shardName = sourceReplica.getStr(SHARD_ID_PROP);
+          String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
+          String collectionName = sourceReplica.getStr(COLLECTION_PROP);
+          String key = collectionName + "_" + replicaName;
+          CollectionStateWatcher watcher;
+          if (waitForFinalState) {
+            watcher = new ActiveReplicaWatcher(collectionName, null,
+                Collections.singletonList(addedReplica.getStr(ZkStateReader.CORE_NAME_PROP)), replicasToRecover);
+          } else {
+            watcher = new LeaderRecoveryWatcher(collectionName, shardName, replicaName,
+                addedReplica.getStr(ZkStateReader.CORE_NAME_PROP), replicasToRecover);
+          }
+          watchers.put(key, watcher);
+          log.debug("--- adding " + key + ", " + watcher);
+          zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
+        } else {
+          log.debug("--- not waiting for " + addedReplica);
+        }
+      }
+    }
+
+    log.debug("Waiting for replicas to be added");
+    if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
+      log.info("Timed out waiting for replicas to be added");
+      anyOneFailed.set(true);
+    } else {
+      log.debug("Finished waiting for replicas to be added");
+    }
+
+    // now wait for leader replicas to recover
+    log.debug("Waiting for " + numLeaders + " leader replicas to recover");
+    if (!replicasToRecover.await(timeout, TimeUnit.SECONDS)) {
+      log.info("Timed out waiting for " + replicasToRecover.getCount() + " leader replicas to recover");
+      anyOneFailed.set(true);
+    } else {
+      log.debug("Finished waiting for leader replicas to recover");
+    }
+    // remove the watchers, we're done either way
+    for (Map.Entry<String, CollectionStateWatcher> e : watchers.entrySet()) {
+      zkStateReader.removeCollectionStateWatcher(e.getKey(), e.getValue());
+    }
+    if (anyOneFailed.get()) {
+      log.info("Failed to create some replicas. Cleaning up all replicas on target node");
+      SolrCloseableLatch cleanupLatch = new SolrCloseableLatch(createdReplicas.size(), ocmh);
+      for (ZkNodeProps createdReplica : createdReplicas) {
+        NamedList deleteResult = new NamedList();
+        try {
+          ocmh.deleteReplica(zkStateReader.getClusterState(), createdReplica.plus("parallel", "true"), deleteResult, () -> {
+            cleanupLatch.countDown();
+            if (deleteResult.get("failure") != null) {
+              synchronized (results) {
+                results.add("failure", "Could not cleanup, because of : " + deleteResult.get("failure"));
+              }
+            }
+          });
+        } catch (KeeperException e) {
+          cleanupLatch.countDown();
+          log.warn("Error deleting replica ", e);
+        } catch (Exception e) {
+          log.warn("Error deleting replica ", e);
+          cleanupLatch.countDown();
+          throw e;
+        }
+      }
+      cleanupLatch.await(5, TimeUnit.MINUTES);
+      return;
+    }
+
+
+    // we have reached this far means all replicas could be recreated
+    //now cleanup the replicas in the source node
+    DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
+    results.add("success", "REPLACENODE action completed successfully from  : " + source + " to : " + target);
+  }
+
+  static List<ZkNodeProps> getReplicasOfNode(String source, ClusterState state) {
+    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
+    for (Map.Entry<String, DocCollection> e : state.getCollectionsMap().entrySet()) {
+      for (Slice slice : e.getValue().getSlices()) {
+        for (Replica replica : slice.getReplicas()) {
+          if (source.equals(replica.getNodeName())) {
+            ZkNodeProps props = new ZkNodeProps(
+                COLLECTION_PROP, e.getKey(),
+                SHARD_ID_PROP, slice.getName(),
+                ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
+                ZkStateReader.REPLICA_PROP, replica.getName(),
+                ZkStateReader.REPLICA_TYPE, replica.getType().name(),
+                ZkStateReader.LEADER_PROP, String.valueOf(replica.equals(slice.getLeader())),
+                CoreAdminParams.NODE, source);
+            sourceReplicas.add(props);
+          }
+        }
+      }
+    }
+    return sourceReplicas;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
new file mode 100644
index 0000000..09ceb55
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public RestoreCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    // TODO maybe we can inherit createCollection's options/code
+
+    String restoreCollectionName = message.getStr(COLLECTION_PROP);
+    String backupName = message.getStr(NAME); // of backup
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    String asyncId = message.getStr(ASYNC);
+    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
+    Map<String, String> requestMap = new HashMap<>();
+
+    CoreContainer cc = ocmh.overseer.getCoreContainer();
+    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
+
+    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+    URI backupPath = repository.resolve(location, backupName);
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    BackupManager backupMgr = new BackupManager(repository, zkStateReader);
+
+    Properties properties = backupMgr.readBackupProperties(location, backupName);
+    String backupCollection = properties.getProperty(BackupManager.COLLECTION_NAME_PROP);
+    DocCollection backupCollectionState = backupMgr.readCollectionState(location, backupName, backupCollection);
+
+    // Get the Solr nodes to restore a collection.
+    final List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(
+        zkStateReader.getClusterState().getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM);
+
+    int numShards = backupCollectionState.getActiveSlices().size();
+    
+    int numNrtReplicas = getInt(message, NRT_REPLICAS, backupCollectionState.getNumNrtReplicas(), 0);
+    if (numNrtReplicas == 0) {
+      numNrtReplicas = getInt(message, REPLICATION_FACTOR, backupCollectionState.getReplicationFactor(), 0);
+    }
+    int numTlogReplicas = getInt(message, TLOG_REPLICAS, backupCollectionState.getNumTlogReplicas(), 0);
+    int numPullReplicas = getInt(message, PULL_REPLICAS, backupCollectionState.getNumPullReplicas(), 0);
+    int totalReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
+    
+    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, backupCollectionState.getMaxShardsPerNode());
+    int availableNodeCount = nodeList.size();
+    if ((numShards * totalReplicasPerShard) > (availableNodeCount * maxShardsPerNode)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          String.format(Locale.ROOT, "Solr cloud with available number of nodes:%d is insufficient for"
+              + " restoring a collection with %d shards, total replicas per shard %d and maxShardsPerNode %d."
+              + " Consider increasing maxShardsPerNode value OR number of available nodes.",
+              availableNodeCount, numShards, totalReplicasPerShard, maxShardsPerNode));
+    }
+
+    //Upload the configs
+    String configName = (String) properties.get(OverseerCollectionMessageHandler.COLL_CONF);
+    String restoreConfigName = message.getStr(OverseerCollectionMessageHandler.COLL_CONF, configName);
+    if (zkStateReader.getConfigManager().configExists(restoreConfigName)) {
+      log.info("Using existing config {}", restoreConfigName);
+      //TODO add overwrite option?
+    } else {
+      log.info("Uploading config {}", restoreConfigName);
+      backupMgr.uploadConfigDir(location, backupName, configName, restoreConfigName);
+    }
+
+    log.info("Starting restore into collection={} with backup_name={} at location={}", restoreCollectionName, backupName,
+        location);
+
+    //Create core-less collection
+    {
+      Map<String, Object> propMap = new HashMap<>();
+      propMap.put(Overseer.QUEUE_OPERATION, CREATE.toString());
+      propMap.put("fromApi", "true"); // mostly true.  Prevents autoCreated=true in the collection state.
+      if (properties.get(STATE_FORMAT) == null) {
+        propMap.put(STATE_FORMAT, "2");
+      }
+
+      // inherit settings from input API, defaulting to the backup's setting.  Ex: replicationFactor
+      for (String collProp : OverseerCollectionMessageHandler.COLL_PROPS.keySet()) {
+        Object val = message.getProperties().getOrDefault(collProp, backupCollectionState.get(collProp));
+        if (val != null) {
+          propMap.put(collProp, val);
+        }
+      }
+
+      propMap.put(NAME, restoreCollectionName);
+      propMap.put(OverseerCollectionMessageHandler.CREATE_NODE_SET, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY); //no cores
+      propMap.put(OverseerCollectionMessageHandler.COLL_CONF, restoreConfigName);
+
+      // router.*
+      @SuppressWarnings("unchecked")
+      Map<String, Object> routerProps = (Map<String, Object>) backupCollectionState.getProperties().get(DocCollection.DOC_ROUTER);
+      for (Map.Entry<String, Object> pair : routerProps.entrySet()) {
+        propMap.put(DocCollection.DOC_ROUTER + "." + pair.getKey(), pair.getValue());
+      }
+
+      Set<String> sliceNames = backupCollectionState.getActiveSlicesMap().keySet();
+      if (backupCollectionState.getRouter() instanceof ImplicitDocRouter) {
+        propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, StrUtils.join(sliceNames, ','));
+      } else {
+        propMap.put(OverseerCollectionMessageHandler.NUM_SLICES, sliceNames.size());
+        // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure instead of a
+        //   list of names, and if so uses this instead of building it.  We clear the replica list.
+        Collection<Slice> backupSlices = backupCollectionState.getActiveSlices();
+        Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
+        for (Slice backupSlice : backupSlices) {
+          newSlices.put(backupSlice.getName(),
+              new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties()));
+        }
+        propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, newSlices);
+      }
+
+      ocmh.commandMap.get(CREATE).call(zkStateReader.getClusterState(), new ZkNodeProps(propMap), new NamedList());
+      // note: when createCollection() returns, the collection exists (no race)
+    }
+
+    DocCollection restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
+
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+
+    //Mark all shards in CONSTRUCTION STATE while we restore the data
+    {
+      //TODO might instead createCollection accept an initial state?  Is there a race?
+      Map<String, Object> propMap = new HashMap<>();
+      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+      for (Slice shard : restoreCollection.getSlices()) {
+        propMap.put(shard.getName(), Slice.State.CONSTRUCTION.toString());
+      }
+      propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
+      inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+    }
+
+    // TODO how do we leverage the RULE / SNITCH logic in createCollection?
+
+    ClusterState clusterState = zkStateReader.getClusterState();
+
+    List<String> sliceNames = new ArrayList<>();
+    restoreCollection.getSlices().forEach(x -> sliceNames.add(x.getName()));
+    PolicyHelper.SessionWrapper sessionWrapper = null;
+
+    try {
+      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(
+          ocmh.cloudManager, clusterState,
+          nodeList, restoreCollectionName,
+          message, sliceNames,
+          numNrtReplicas, numTlogReplicas, numPullReplicas);
+      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
+      //Create one replica per shard and copy backed up data to it
+      for (Slice slice : restoreCollection.getSlices()) {
+        log.debug("Adding replica for shard={} collection={} ", slice.getName(), restoreCollection);
+        HashMap<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD);
+        propMap.put(COLLECTION_PROP, restoreCollectionName);
+        propMap.put(SHARD_ID_PROP, slice.getName());
+
+        if (numNrtReplicas >= 1) {
+          propMap.put(REPLICA_TYPE, Replica.Type.NRT.name());
+        } else if (numTlogReplicas >= 1) {
+          propMap.put(REPLICA_TYPE, Replica.Type.TLOG.name());
+        } else {
+          throw new SolrException(ErrorCode.BAD_REQUEST, "Unexpected number of replicas, replicationFactor, " +
+              Replica.Type.NRT + " or " + Replica.Type.TLOG + " must be greater than 0");
+        }
+
+        // Get the first node matching the shard to restore in
+        String node;
+        for (ReplicaPosition replicaPosition : replicaPositions) {
+          if (Objects.equals(replicaPosition.shard, slice.getName())) {
+            node = replicaPosition.node;
+            propMap.put(CoreAdminParams.NODE, node);
+            replicaPositions.remove(replicaPosition);
+            break;
+          }
+        }
+
+        // add async param
+        if (asyncId != null) {
+          propMap.put(ASYNC, asyncId);
+        }
+        ocmh.addPropertyParams(message, propMap);
+
+        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), new NamedList(), null);
+      }
+
+      //refresh the location copy of collection state
+      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
+
+      //Copy data from backed up index to each replica
+      for (Slice slice : restoreCollection.getSlices()) {
+        ModifiableSolrParams params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
+        params.set(NAME, "snapshot." + slice.getName());
+        params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
+        params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
+
+        ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
+      }
+      ocmh.processResponses(new NamedList(), shardHandler, true, "Could not restore core", asyncId, requestMap);
+
+      //Mark all shards in ACTIVE STATE
+      {
+        HashMap<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
+        for (Slice shard : restoreCollection.getSlices()) {
+          propMap.put(shard.getName(), Slice.State.ACTIVE.toString());
+        }
+        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+      }
+
+      //refresh the location copy of collection state
+      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
+
+      if (totalReplicasPerShard > 1) {
+        log.info("Adding replicas to restored collection={}", restoreCollection);
+        for (Slice slice : restoreCollection.getSlices()) {
+
+          //Add the remaining replicas for each shard, considering it's type
+          int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
+
+          // We already created either a NRT or an TLOG replica as leader
+          if (numNrtReplicas > 0) {
+            createdNrtReplicas++;
+          } else if (createdTlogReplicas > 0) {
+            createdTlogReplicas++;
+          }
+
+          for (int i = 1; i < totalReplicasPerShard; i++) {
+            Replica.Type typeToCreate;
+            if (createdNrtReplicas < numNrtReplicas) {
+              createdNrtReplicas++;
+              typeToCreate = Replica.Type.NRT;
+            } else if (createdTlogReplicas < numTlogReplicas) {
+              createdTlogReplicas++;
+              typeToCreate = Replica.Type.TLOG;
+            } else {
+              createdPullReplicas++;
+              typeToCreate = Replica.Type.PULL;
+              assert createdPullReplicas <= numPullReplicas: "Unexpected number of replicas";
+            }
+
+            log.debug("Adding replica for shard={} collection={} of type {} ", slice.getName(), restoreCollection, typeToCreate);
+            HashMap<String, Object> propMap = new HashMap<>();
+            propMap.put(COLLECTION_PROP, restoreCollectionName);
+            propMap.put(SHARD_ID_PROP, slice.getName());
+            propMap.put(REPLICA_TYPE, typeToCreate.name());
+
+            // Get the first node matching the shard to restore in
+            String node;
+            for (ReplicaPosition replicaPosition : replicaPositions) {
+              if (Objects.equals(replicaPosition.shard, slice.getName())) {
+                node = replicaPosition.node;
+                propMap.put(CoreAdminParams.NODE, node);
+                replicaPositions.remove(replicaPosition);
+                break;
+              }
+            }
+
+            // add async param
+            if (asyncId != null) {
+              propMap.put(ASYNC, asyncId);
+            }
+            ocmh.addPropertyParams(message, propMap);
+
+            ocmh.addReplica(zkStateReader.getClusterState(), new ZkNodeProps(propMap), results, null);
+          }
+        }
+      }
+
+      log.info("Completed restoring collection={} backupName={}", restoreCollection, backupName);
+    } finally {
+      if (sessionWrapper != null) sessionWrapper.release();
+    }
+  }
+
+  private int getInt(ZkNodeProps message, String propertyName, Integer count, int defaultValue) {
+    Integer value = message.getInt(propertyName, count);
+    return value!=null ? value:defaultValue;
+  }
+}


[12/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
new file mode 100644
index 0000000..eff0d8e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
+
+  private static final int MAX_TIMEOUT_SECONDS = 60;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(2)
+        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+        .configure();
+  }
+
+  @Test
+  public void testSolrJAPICalls() throws Exception {
+
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);
+
+    state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
+        .setShardName("shard1")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
+    assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);
+
+  }
+
+  @Test
+  public void testAsyncRequests() throws Exception {
+
+    final String collection = "testAsyncOperations";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    RequestStatusState state = CollectionAdminRequest.createCollection(collection,"conf1",1,1)
+        .setRouterName("implicit")
+        .setShards("shard1")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
+
+    //Add a few documents to shard1
+    int numDocs = TestUtil.nextInt(random(), 10, 100);
+    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
+    for (int i=0; i<numDocs; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", i);
+      doc.addField("_route_", "shard1");
+      docs.add(doc);
+    }
+    client.add(collection, docs);
+    client.commit(collection);
+
+    SolrQuery query = new SolrQuery("*:*");
+    query.set("shards", "shard1");
+    assertEquals(numDocs, client.query(collection, query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.reloadCollection(collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("ReloadCollection did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.createShard(collection,"shard2")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
+
+    //Add a doc to shard2 to make sure shard2 was created properly
+    SolrInputDocument doc = new SolrInputDocument();
+    doc.addField("id", numDocs + 1);
+    doc.addField("_route_", "shard2");
+    client.add(collection, doc);
+    client.commit(collection);
+    query = new SolrQuery("*:*");
+    query.set("shards", "shard2");
+    assertEquals(1, client.query(collection, query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.deleteShard(collection,"shard2").processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteShard did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
+
+    //cloudClient watch might take a couple of seconds to reflect it
+    Slice shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+    int count = 0;
+    while (shard1.getReplicas().size() != 2) {
+      if (count++ > 1000) {
+        fail("2nd Replica not reflecting in the cluster state");
+      }
+      Thread.sleep(100);
+    }
+
+    state = CollectionAdminRequest.createAlias("myalias",collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateAlias did not complete", RequestStatusState.COMPLETED, state);
+
+    query = new SolrQuery("*:*");
+    query.set("shards", "shard1");
+    assertEquals(numDocs, client.query("myalias", query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.deleteAlias("myalias")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteAlias did not complete", RequestStatusState.COMPLETED, state);
+
+    try {
+      client.query("myalias", query);
+      fail("Alias should not exist");
+    } catch (SolrException e) {
+      //expected
+    }
+
+    Replica replica = shard1.getReplicas().iterator().next();
+    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
+      if (!replica.getNodeName().equals(liveNode)) {
+        state = new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
+            .processAndWait(client, MAX_TIMEOUT_SECONDS);
+        assertSame("MoveReplica did not complete", RequestStatusState.COMPLETED, state);
+        break;
+      }
+    }
+
+    shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+    String replicaName = shard1.getReplicas().iterator().next().getName();
+    state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.deleteCollection(collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
new file mode 100644
index 0000000..213e554
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
@@ -0,0 +1,686 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import javax.management.MBeanServer;
+import javax.management.MBeanServerFactory;
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.lang.management.ManagementFactory;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.commons.io.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoBean.Category;
+import org.apache.solr.util.LogLevel;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void beforeCollectionsAPIDistributedZkTest() {
+    // we don't want this test to have zk timeouts
+    System.setProperty("zkClientTimeout", "240000");
+    TestInjection.randomDelayInCoreCreation = "true:20";
+    System.setProperty("validateAfterInactivity", "200");
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8");
+    configureCluster(4)
+        .addConfig("conf", configset("cloud-minimal"))
+        .addConfig("conf2", configset("cloud-minimal-jmx"))
+        .withSolrXml(solrXml)
+        .configure();
+  }
+
+  @Before
+  public void clearCluster() throws Exception {
+    try {
+      cluster.deleteAllCollections();
+    } finally {
+      System.clearProperty("zkClientTimeout");
+    }
+  }
+
+  @Test
+  public void testCreationAndDeletion() throws Exception {
+
+    String collectionName = "created_and_deleted";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+        .contains(collectionName));
+
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+
+  }
+
+  @Test
+  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
+    
+    String collectionName = "out_of_sync_collection";
+
+    // manually create a collections zknode
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+    
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+  }
+
+  @Test
+  public void deletePartiallyCreatedCollection() throws Exception {
+
+    final String collectionName = "halfdeletedcollection";
+
+    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    String dataDir = createTempDir().toFile().getAbsolutePath();
+    // create a core that simulates something left over from a partially-deleted collection
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(collectionName, "shard1")
+        .setDataDir(dataDir)
+        .process(cluster.getSolrClient()).isSuccess());
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+  }
+
+  @Test
+  public void deleteCollectionOnlyInZk() throws Exception {
+
+    final String collectionName = "onlyinzk";
+
+    // create the collections node, but nothing else
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+    // delete via API - should remove collections node
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+    
+    // now creating that collection should work
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+  }
+
+  @Test
+  public void testBadActionNames() throws Exception {
+
+    // try a bad action
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", "BADACTION");
+    String collectionName = "badactioncollection";
+    params.set("name", collectionName);
+    params.set("numShards", 2);
+    final QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testMissingRequiredParameters() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("numShards", 2);
+    // missing required collection parameter
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+  }
+
+  @Test
+  public void testTooManyReplicas() {
+
+    CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(req);
+    });
+
+  }
+
+  @Test
+  public void testMissingNumShards() {
+
+    // No numShards should fail
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testZeroNumShards() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("numShards", 0);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testCreateShouldFailOnExistingCore() throws Exception {
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
+        .setNode(cluster.getJettySolrRunner(0).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
+        .setNode(cluster.getJettySolrRunner(1).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
+    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
+
+    CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
+        .setCreateNodeSet(nn1 + "," + nn2)
+        .process(cluster.getSolrClient());
+    
+    SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
+    SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
+
+    assertNotNull(resp.toString(), success);
+    assertNotNull(resp.toString(), failure);
+    
+    String val1 = success.getVal(0).toString();
+    String val2 = failure.getVal(0).toString();
+    assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
+  }
+
+  @Test
+  public void testNoConfigSetExist() throws Exception {
+
+    expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
+          .process(cluster.getSolrClient());
+    });
+
+    TimeUnit.MILLISECONDS.sleep(1000);
+    // in both cases, the collection should have default to the core name
+    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
+  }
+
+  @Test
+  public void testCoresAreDistributedAcrossNodes() throws Exception {
+
+    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
+        .process(cluster.getSolrClient());
+
+    Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+
+    List<String> createNodeList = new ArrayList<>();
+    createNodeList.addAll(liveNodes);
+
+    DocCollection collection = getCollectionState("nodes_used_collection");
+    for (Slice slice : collection.getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        createNodeList.remove(replica.getNodeName());
+      }
+    }
+
+    assertEquals(createNodeList.toString(), 0, createNodeList.size());
+
+  }
+
+  @Test
+  public void testDeleteNonExistentCollection() throws Exception {
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
+    });
+
+    // create another collection should still work
+    CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
+        .process(cluster.getSolrClient());
+    waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
+        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+  }
+
+  @Test
+  public void testSpecificConfigsets() throws Exception {
+    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
+    byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
+    assertNotNull(data);
+    ZkNodeProps props = ZkNodeProps.load(data);
+    String configName = props.getStr(ZkController.CONFIGNAME_PROP);
+    assertEquals("conf2", configName);
+  }
+
+  @Test
+  public void testMaxNodesPerShard() throws Exception {
+
+    // test maxShardsPerNode
+    int numLiveNodes = cluster.getJettySolrRunners().size();
+    int numShards = (numLiveNodes/2) + 1;
+    int replicationFactor = 2;
+    int maxShardsPerNode = 1;
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
+          .process(cluster.getSolrClient());
+    });
+
+  }
+
+  @Test
+  public void testCreateNodeSet() throws Exception {
+
+    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
+    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
+
+    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
+
+    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
+        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState("nodeset_collection");
+    for (Replica replica : collectionState.getReplicas()) {
+      String replicaUrl = replica.getCoreUrl();
+      boolean matchingJetty = false;
+      for (String jettyUrl : baseUrls) {
+        if (replicaUrl.startsWith(jettyUrl))
+          matchingJetty = true;
+      }
+      if (matchingJetty == false)
+        fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
+    }
+
+  }
+
+  @Test
+  public void testCollectionsAPI() throws Exception {
+
+    // create new collections rapid fire
+    int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
+    CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
+
+    for (int i = 0; i < cnt; i++) {
+
+      int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
+      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
+      int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
+
+      createRequests[i]
+          = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
+          .setMaxShardsPerNode(maxShardsPerNode);
+      createRequests[i].processAsync(cluster.getSolrClient());
+    }
+
+    for (int i = 0; i < cnt; i++) {
+      String collectionName = "awhollynewcollection_" + i;
+      final int j = i;
+      waitForState("Expected to see collection " + collectionName, collectionName,
+          (n, c) -> {
+            CollectionAdminRequest.Create req = createRequests[j];
+            return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
+          });
+    }
+
+    cluster.injectChaos(random());
+
+    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+      checkInstanceDirs(cluster.getJettySolrRunner(i));
+    }
+
+    String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
+
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .commit(cluster.getSolrClient(), collectionName);
+    TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+    while (!timeOut.hasTimedOut()) {
+      try {
+        long numFound = cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound();
+        assertEquals(3, numFound);
+        break;
+      } catch (Exception e) {
+        // Query node can have stale clusterstate
+        log.info("Error when query " + collectionName, e);
+        Thread.sleep(500);
+      }
+    }
+    if (timeOut.hasTimedOut()) {
+      fail("Timeout on query " + collectionName);
+    }
+
+    checkNoTwoShardsUseTheSameIndexDir();
+  }
+
+  @Test
+  public void testCollectionReload() throws Exception {
+
+    final String collectionName = "reloaded_collection";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
+
+    // get core open times
+    Map<String, Long> urlToTimeBefore = new HashMap<>();
+    collectStartTimes(collectionName, urlToTimeBefore);
+    assertTrue(urlToTimeBefore.size() > 0);
+
+    CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
+
+    // reloads make take a short while
+    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
+    assertTrue("some core start times did not change on reload", allTimesAreCorrect);
+  }
+
+  private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
+    CoreContainer cores = jetty.getCoreContainer();
+    Collection<SolrCore> theCores = cores.getCores();
+    for (SolrCore core : theCores) {
+
+      // look for core props file
+      Path instancedir = (Path) core.getResourceLoader().getInstancePath();
+      assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
+
+      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
+
+      assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
+
+    }
+  }
+
+  private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
+
+
+    TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+
+    boolean allTimesAreCorrect = false;
+    while (! timeout.hasTimedOut()) {
+      Map<String,Long> urlToTimeAfter = new HashMap<>();
+      collectStartTimes(collectionName, urlToTimeAfter);
+      
+      boolean retry = false;
+      Set<Entry<String,Long>> entries = urlToTimeBefore.entrySet();
+      for (Entry<String,Long> entry : entries) {
+        Long beforeTime = entry.getValue();
+        Long afterTime = urlToTimeAfter.get(entry.getKey());
+        assertNotNull(afterTime);
+        if (afterTime <= beforeTime) {
+          retry = true;
+          break;
+        }
+
+      }
+      if (!retry) {
+        allTimesAreCorrect = true;
+        break;
+      }
+    }
+    return allTimesAreCorrect;
+  }
+
+  private void collectStartTimes(String collectionName, Map<String,Long> urlToTime)
+      throws SolrServerException, IOException {
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    if (collectionState != null) {
+      for (Slice shard : collectionState) {
+        for (Replica replica : shard) {
+          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
+          CoreStatus coreStatus;
+          try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
+            coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
+          }
+          long before = coreStatus.getCoreStartTime().getTime();
+          urlToTime.put(coreProps.getCoreUrl(), before);
+        }
+      }
+    } else {
+      throw new IllegalArgumentException("Could not find collection " + collectionName);
+    }
+  }
+  
+  private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
+    Map<String, Set<String>> indexDirToShardNamesMap = new HashMap<>();
+    
+    List<MBeanServer> servers = new LinkedList<>();
+    servers.add(ManagementFactory.getPlatformMBeanServer());
+    servers.addAll(MBeanServerFactory.findMBeanServer(null));
+    for (final MBeanServer server : servers) {
+      Set<ObjectName> mbeans = new HashSet<>();
+      mbeans.addAll(server.queryNames(null, null));
+      for (final ObjectName mbean : mbeans) {
+
+        try {
+          Map<String, String> props = mbean.getKeyPropertyList();
+          String category = props.get("category");
+          String name = props.get("name");
+          if ((category != null && category.toString().equals(Category.CORE.toString())) &&
+              (name != null && name.equals("indexDir"))) {
+            String indexDir = server.getAttribute(mbean, "Value").toString();
+            String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
+            if (!indexDirToShardNamesMap.containsKey(indexDir)) {
+              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
+            }
+            indexDirToShardNamesMap.get(indexDir.toString()).add(key);
+          }
+        } catch (Exception e) {
+          // ignore, just continue - probably a "Value" attribute
+          // not found
+        }
+      }
+    }
+    
+    assertTrue(
+        "Something is broken in the assert for no shards using the same indexDir - probably something was changed in the attributes published in the MBean of "
+            + SolrCore.class.getSimpleName() + " : " + indexDirToShardNamesMap,
+        indexDirToShardNamesMap.size() > 0);
+    for (Entry<String,Set<String>> entry : indexDirToShardNamesMap.entrySet()) {
+      if (entry.getValue().size() > 1) {
+        fail("We have shards using the same indexDir. E.g. shards "
+            + entry.getValue().toString() + " all use indexDir "
+            + entry.getKey());
+      }
+    }
+
+  }
+
+  @Test
+  @LogLevel("org.apache.solr.cloud=DEBUG")
+  public void addReplicaTest() throws Exception {
+    String collectionName = "addReplicaColl";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+        .setMaxShardsPerNode(4)
+        .process(cluster.getSolrClient());
+
+    ArrayList<String> nodeList
+        = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+    Collections.shuffle(nodeList, random());
+
+    CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeList.get(0))
+        .process(cluster.getSolrClient());
+    Replica newReplica = grabNewReplica(response, getCollectionState(collectionName));
+
+    assertEquals("Replica should be created on the right node",
+        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
+        newReplica.getStr(ZkStateReader.BASE_URL_PROP));
+
+    Path instancePath = createTempDir();
+    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
+        .process(cluster.getSolrClient());
+    newReplica = grabNewReplica(response, getCollectionState(collectionName));
+    assertNotNull(newReplica);
+
+    try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
+      NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
+      String instanceDirStr = (String) coreStatus.get("instanceDir");
+      assertEquals(instanceDirStr, instancePath.toString());
+    }
+
+    //Test to make sure we can't create another replica with an existing core_name of that collection
+    String coreName = newReplica.getStr(CORE_NAME_PROP);
+    SolrException e = expectThrows(SolrException.class, () -> {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", "addreplica");
+      params.set("collection", collectionName);
+      params.set("shard", "shard1");
+      params.set("name", coreName);
+      QueryRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      cluster.getSolrClient().request(request);
+    });
+
+    assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
+
+    // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
+    // for ADDREPLICA, this is "property.name". See SOLR-7132
+    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.NAME, "propertyDotName")
+        .process(cluster.getSolrClient());
+
+    newReplica = grabNewReplica(response, getCollectionState(collectionName));
+    assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
+
+  }
+
+  private Replica grabNewReplica(CollectionAdminResponse response, DocCollection docCollection) {
+    String replicaName = response.getCollectionCoresStatus().keySet().iterator().next();
+    Optional<Replica> optional = docCollection.getReplicas().stream()
+        .filter(replica -> replicaName.equals(replica.getCoreName()))
+        .findAny();
+    if (optional.isPresent()) {
+      return optional.get();
+    }
+    throw new AssertionError("Can not find " + replicaName + " from " + docCollection);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
new file mode 100644
index 0000000..1d0036e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.nio.file.Path;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.lucene.util.LuceneTestCase.Nightly;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.cloud.MiniSolrCloudCluster;
+import org.apache.solr.common.util.IOUtils;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Nightly
+public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
+  
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  
+  private MiniSolrCloudCluster solrCluster;
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    solrCluster = new MiniSolrCloudCluster(1, createTempDir(), buildJettyConfig("/solr"));
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    solrCluster.shutdown();
+    super.tearDown();
+  }
+  
+  public void testConcurrentCreateAndDeleteDoesNotFail() {
+    final AtomicReference<Exception> failure = new AtomicReference<>();
+    final int timeToRunSec = 30;
+    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[10];
+    for (int i = 0; i < threads.length; i++) {
+      final String collectionName = "collection" + i;
+      uploadConfig(configset("configset-2"), collectionName);
+      final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
+      final SolrClient solrClient = getHttpSolrClient(baseUrl);
+      threads[i] = new CreateDeleteSearchCollectionThread("create-delete-search-" + i, collectionName, collectionName, 
+          timeToRunSec, solrClient, failure);
+    }
+    
+    startAll(threads);
+    joinAll(threads);
+    
+    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
+  }
+  
+  public void testConcurrentCreateAndDeleteOverTheSameConfig() {
+    final String configName = "testconfig";
+    uploadConfig(configset("configset-2"), configName); // upload config once, to be used by all collections
+    final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
+    final AtomicReference<Exception> failure = new AtomicReference<>();
+    final int timeToRunSec = 30;
+    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[2];
+    for (int i = 0; i < threads.length; i++) {
+      final String collectionName = "collection" + i;
+      final SolrClient solrClient = getHttpSolrClient(baseUrl);
+      threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName,
+                                                    timeToRunSec, solrClient, failure);
+    }
+
+    startAll(threads);
+    joinAll(threads);
+
+    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
+  }
+  
+  private void uploadConfig(Path configDir, String configName) {
+    try {
+      solrCluster.uploadConfigSet(configDir, configName);
+    } catch (IOException | KeeperException | InterruptedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+  
+  private void joinAll(final CreateDeleteCollectionThread[] threads) {
+    for (CreateDeleteCollectionThread t : threads) {
+      try {
+        t.joinAndClose();
+      } catch (InterruptedException e) {
+        Thread.interrupted();
+        throw new RuntimeException(e);
+      }
+    }
+  }
+  
+  private void startAll(final Thread[] threads) {
+    for (Thread t : threads) {
+      t.start();
+    }
+  }
+  
+  private static class CreateDeleteCollectionThread extends Thread {
+    protected final String collectionName;
+    protected final String configName;
+    protected final long timeToRunSec;
+    protected final SolrClient solrClient;
+    protected final AtomicReference<Exception> failure;
+    
+    public CreateDeleteCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
+        SolrClient solrClient, AtomicReference<Exception> failure) {
+      super(name);
+      this.collectionName = collectionName;
+      this.timeToRunSec = timeToRunSec;
+      this.solrClient = solrClient;
+      this.failure = failure;
+      this.configName = configName;
+    }
+    
+    @Override
+    public void run() {
+      final TimeOut timeout = new TimeOut(timeToRunSec, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+      while (! timeout.hasTimedOut() && failure.get() == null) {
+        doWork();
+      }
+    }
+    
+    protected void doWork() {
+      createCollection();
+      deleteCollection();
+    }
+    
+    protected void addFailure(Exception e) {
+      log.error("Add Failure", e);
+      synchronized (failure) {
+        if (failure.get() != null) {
+          failure.get().addSuppressed(e);
+        } else {
+          failure.set(e);
+        }
+      }
+    }
+    
+    private void createCollection() {
+      try {
+        final CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName,configName,1,1)
+                .process(solrClient);
+        if (response.getStatus() != 0) {
+          addFailure(new RuntimeException("failed to create collection " + collectionName));
+        }
+      } catch (Exception e) {
+        addFailure(e);
+      }
+      
+    }
+    
+    private void deleteCollection() {
+      try {
+        final CollectionAdminRequest.Delete deleteCollectionRequest
+          = CollectionAdminRequest.deleteCollection(collectionName);
+        final CollectionAdminResponse response = deleteCollectionRequest.process(solrClient);
+        if (response.getStatus() != 0) {
+          addFailure(new RuntimeException("failed to delete collection " + collectionName));
+        }
+      } catch (Exception e) {
+        addFailure(e);
+      }
+    }
+    
+    public void joinAndClose() throws InterruptedException {
+      try {
+        super.join(60000);
+      } finally {
+        IOUtils.closeQuietly(solrClient);
+      }
+    }
+  }
+  
+  private static class CreateDeleteSearchCollectionThread extends CreateDeleteCollectionThread {
+
+    public CreateDeleteSearchCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
+        SolrClient solrClient, AtomicReference<Exception> failure) {
+      super(name, collectionName, configName, timeToRunSec, solrClient, failure);
+    }
+    
+    @Override
+    protected void doWork() {
+      super.doWork();
+      searchNonExistingCollection();
+    }
+    
+    private void searchNonExistingCollection() {
+      try {
+        solrClient.query(collectionName, new SolrQuery("*"));
+      } catch (Exception e) {
+        if (!e.getMessage().contains("not found") && !e.getMessage().contains("Can not find")) {
+          addFailure(e);
+        }
+      }
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
new file mode 100644
index 0000000..654c7e9
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.Map;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.params.ShardParams._ROUTE_;
+
+/**
+ * Tests the Custom Sharding API.
+ */
+public class CustomCollectionTest extends SolrCloudTestCase {
+
+  private static final int NODE_COUNT = 4;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(NODE_COUNT)
+        .addConfig("conf", configset("cloud-dynamic"))
+        .configure();
+  }
+
+  @Before
+  public void ensureClusterEmpty() throws Exception {
+    cluster.deleteAllCollections();
+  }
+
+  @Test
+  public void testCustomCollectionsAPI() throws Exception {
+
+    final String collection = "implicitcoll";
+    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+    int numShards = 3;
+    int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
+
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .process(cluster.getSolrClient());
+
+    DocCollection coll = getCollectionState(collection);
+    assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
+    assertNotNull(coll.getStr(REPLICATION_FACTOR));
+    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
+    assertNull("A shard of a Collection configured with implicit router must have null range",
+        coll.getSlice("a").getRange());
+
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .withRoute("a")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+
+    cluster.getSolrClient().deleteByQuery(collection, "*:*");
+    cluster.getSolrClient().commit(collection, true, true);
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+
+    new UpdateRequest()
+        .add("id", "9")
+        .add("id", "10")
+        .add("id", "11")
+        .withRoute("c")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
+
+    //Testing CREATESHARD
+    CollectionAdminRequest.createShard(collection, "x")
+        .process(cluster.getSolrClient());
+    waitForState("Expected shard 'x' to be active", collection, (n, c) -> {
+      if (c.getSlice("x") == null)
+        return false;
+      for (Replica r : c.getSlice("x")) {
+        if (r.getState() != Replica.State.ACTIVE)
+          return false;
+      }
+      return true;
+    });
+
+    new UpdateRequest()
+        .add("id", "66", _ROUTE_, "x")
+        .commit(cluster.getSolrClient(), collection);
+    // TODO - the local state is cached and causes the request to fail with 'unknown shard'
+    // assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "x")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testRouteFieldForImplicitRouter() throws Exception {
+
+    int numShards = 4;
+    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
+    String shard_fld = "shard_s";
+
+    final String collection = "withShardField";
+
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .setRouterField(shard_fld)
+        .process(cluster.getSolrClient());
+
+    new UpdateRequest()
+        .add("id", "6", shard_fld, "a")
+        .add("id", "7", shard_fld, "a")
+        .add("id", "8", shard_fld, "b")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testRouteFieldForHashRouter()throws Exception{
+    String collectionName = "routeFieldColl";
+    int numShards = 4;
+    int replicationFactor = 2;
+    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
+    String shard_fld = "shard_s";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .setRouterField(shard_fld)
+        .process(cluster.getSolrClient());
+
+    new UpdateRequest()
+        .add("id", "6", shard_fld, "a")
+        .add("id", "7", shard_fld, "a")
+        .add("id", "8", shard_fld, "b")
+        .commit(cluster.getSolrClient(), collectionName);
+
+    assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(2, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
+
+
+    cluster.getSolrClient().deleteByQuery(collectionName, "*:*");
+    cluster.getSolrClient().commit(collectionName);
+
+    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "100", shard_fld, "c!doc1"));
+    cluster.getSolrClient().commit(collectionName);
+    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c!")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testCreateShardRepFactor() throws Exception  {
+    final String collectionName = "testCreateShardRepFactor";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
+        .process(cluster.getSolrClient());
+
+    CollectionAdminRequest.createShard(collectionName, "x")
+        .process(cluster.getSolrClient());
+
+    waitForState("Not enough active replicas in shard 'x'", collectionName, (n, c) -> {
+      return c.getSlice("x").getReplicas().size() == 1;
+    });
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
new file mode 100644
index 0000000..ae83ebf
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Metric;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.util.BadHdfsThreadsFilter;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+@Nightly
+@ThreadLeakFilters(defaultFilters = true, filters = {
+    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
+})
+public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
+
+  private static MiniDFSCluster dfsCluster;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
+    System.setProperty("tests.hdfs.numdatanodes", "1");
+   
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
+
+    ZkConfigManager configManager = new ZkConfigManager(zkClient());
+    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
+    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf2");
+
+    System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
+  }
+
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    cluster.shutdown(); // need to close before the MiniDFSCluster
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+    System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+    System.clearProperty("tests.hdfs.numdatanodes");
+    System.clearProperty("solr.hdfs.home");
+  }
+
+  @Test
+  public void moveReplicaTest() throws Exception {
+    cluster.waitForAllNodes(5000);
+    String coll = "movereplicatest_coll";
+
+    CloudSolrClient cloudClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
+    create.setMaxShardsPerNode(2);
+    cloudClient.request(create);
+
+    for (int i = 0; i < 10; i++) {
+      cloudClient.add(coll, sdoc("id",String.valueOf(i)));
+      cloudClient.commit(coll);
+    }
+
+    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
+    Collections.shuffle(slices, random());
+    Slice slice = null;
+    Replica replica = null;
+    for (Slice s : slices) {
+      slice = s;
+      for (Replica r : s.getReplicas()) {
+        if (s.getLeader() != r) {
+          replica = r;
+        }
+      }
+    }
+    String dataDir = getDataDir(replica);
+
+    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    ArrayList<String> l = new ArrayList<>(liveNodes);
+    Collections.shuffle(l, random());
+    String targetNode = null;
+    for (String node : liveNodes) {
+      if (!replica.getNodeName().equals(node)) {
+        targetNode = node;
+        break;
+      }
+    }
+    assertNotNull(targetNode);
+
+    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
+    moveReplica.process(cloudClient);
+
+    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
+    checkNumOfCores(cloudClient, targetNode, 2);
+
+    waitForState("Wait for recovery finish failed",coll, clusterShape(2,2));
+    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
+    boolean found = false;
+    for (Replica newReplica : slice.getReplicas()) {
+      if (getDataDir(newReplica).equals(dataDir)) {
+        found = true;
+      }
+    }
+    assertTrue(found);
+
+
+    // data dir is reused so replication will be skipped
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
+      List<String> registryNames = manager.registryNames().stream()
+          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
+      for (String registry : registryNames) {
+        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
+        Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
+        if (counter != null) {
+          assertEquals(0, counter.getCount());
+        }
+      }
+    }
+  }
+
+
+  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
+    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
+  }
+
+  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
+      return status.getCoreStatus().size();
+    }
+  }
+
+  private String getDataDir(Replica replica) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(replica.getBaseUrl())) {
+      CoreStatus status = CoreAdminRequest.getCoreStatus(replica.getCoreName(), coreclient);
+      return status.getDataDirectory();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
new file mode 100644
index 0000000..6f7e717
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+// Collect useful operations for testing assigning properties to individual replicas
+// Could probably expand this to do something creative with getting random slices
+// and shards, but for now this will do.
+public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
+
+  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
+    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    for (int idx = 0; idx < paramsIn.length; idx += 2) {
+      params.set(paramsIn[idx], paramsIn[idx + 1]);
+    }
+    QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    return client.request(request);
+  }
+
+  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
+                                String property)
+      throws KeeperException, InterruptedException {
+    ClusterState clusterState = null;
+    Replica replica = null;
+    for (int idx = 0; idx < 300; ++idx) {
+      clusterState = client.getZkStateReader().getClusterState();
+      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
+      if (replica == null) {
+        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+      }
+      if (StringUtils.isBlank(replica.getProperty(property))) return;
+      Thread.sleep(100);
+    }
+    fail("Property " + property + " not set correctly for collection/replica pair: " +
+        collectionName + "/" + replicaName + ". Replica props: " + replica.getProperties().toString() +
+        ". Cluster state is " + clusterState.toString());
+
+  }
+
+  // The params are triplets,
+  // collection
+  // shard
+  // replica
+  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
+                         String replicaName, String property, String val)
+      throws InterruptedException, KeeperException {
+    Replica replica = null;
+    ClusterState clusterState = null;
+
+    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
+      clusterState = client.getZkStateReader().getClusterState();
+      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
+      if (replica == null) {
+        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+      }
+      if (StringUtils.equals(val, replica.getProperty(property))) return;
+      Thread.sleep(100);
+    }
+
+    fail("Property '" + property + "' with value " + replica.getProperty(property) +
+        " not set correctly for collection/replica pair: " + collectionName + "/" + replicaName + " property map is " +
+        replica.getProperties().toString() + ".");
+
+  }
+
+  // Verify that
+  // 1> the property is only set once in all the replicas in a slice.
+  // 2> the property is balanced evenly across all the nodes hosting collection
+  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
+                                    String property) throws KeeperException, InterruptedException {
+    verifyUnique(client, collectionName, property, true);
+  }
+
+  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
+                            String property) throws KeeperException, InterruptedException {
+    verifyUnique(client, collectionName, property, false);
+  }
+
+  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
+      throws KeeperException, InterruptedException {
+
+    DocCollection col = null;
+    for (int idx = 0; idx < 300; ++idx) {
+      ClusterState clusterState = client.getZkStateReader().getClusterState();
+
+      col = clusterState.getCollection(collectionName);
+      if (col == null) {
+        fail("Could not find collection " + collectionName);
+      }
+      Map<String, Integer> counts = new HashMap<>();
+      Set<String> uniqueNodes = new HashSet<>();
+      boolean allSlicesHaveProp = true;
+      boolean badSlice = false;
+      for (Slice slice : col.getSlices()) {
+        boolean thisSliceHasProp = false;
+        int propCount = 0;
+        for (Replica replica : slice.getReplicas()) {
+          uniqueNodes.add(replica.getNodeName());
+          String propVal = replica.getProperty(property);
+          if (StringUtils.isNotBlank(propVal)) {
+            ++propCount;
+            if (counts.containsKey(replica.getNodeName()) == false) {
+              counts.put(replica.getNodeName(), 0);
+            }
+            int count = counts.get(replica.getNodeName());
+            thisSliceHasProp = true;
+            counts.put(replica.getNodeName(), count + 1);
+          }
+        }
+        badSlice = (propCount > 1) ? true : badSlice;
+        allSlicesHaveProp = allSlicesHaveProp ? thisSliceHasProp : allSlicesHaveProp;
+      }
+      if (balanced == false && badSlice == false) {
+        return;
+      }
+      if (allSlicesHaveProp && balanced) {
+        // Check that the properties are evenly distributed.
+        int minProps = col.getSlices().size() / uniqueNodes.size();
+        int maxProps = minProps;
+
+        if (col.getSlices().size() % uniqueNodes.size() > 0) {
+          ++maxProps;
+        }
+        boolean doSleep = false;
+        for (Map.Entry<String, Integer> ent : counts.entrySet()) {
+          if (ent.getValue() != minProps && ent.getValue() != maxProps) {
+            doSleep = true;
+          }
+        }
+
+        if (doSleep == false) {
+          assertTrue("We really shouldn't be calling this if there is no node with the property " + property,
+              counts.size() > 0);
+          return;
+        }
+      }
+      Thread.sleep(100);
+    }
+    fail("Collection " + collectionName + " does not have roles evenly distributed. Collection is: " + col.toString());
+  }
+
+}


[06/41] lucene-solr:jira/solr-11702: SOLR-11737: Add kmeans Stream Evaluator to support kmeans clustering

Posted by da...@apache.org.
SOLR-11737: Add kmeans Stream Evaluator to support kmeans clustering


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a08f7127
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a08f7127
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a08f7127

Branch: refs/heads/jira/solr-11702
Commit: a08f71279ca79113f41a0ae1f954931195ebba41
Parents: d99bfa4
Author: Joel Bernstein <jb...@apache.org>
Authored: Mon Jan 15 14:50:17 2018 -0500
Committer: Joel Bernstein <jb...@apache.org>
Committed: Mon Jan 15 14:50:34 2018 -0500

----------------------------------------------------------------------
 .../org/apache/solr/handler/StreamHandler.java  |   9 +
 .../client/solrj/io/eval/ColumnAtEvaluator.java |  55 ++++++
 .../solrj/io/eval/FeatureSelectEvaluator.java   |  93 ++++++++++
 .../solrj/io/eval/GetCentroidsEvaluator.java    |  55 ++++++
 .../solrj/io/eval/GetClusterEvaluator.java      |  64 +++++++
 .../client/solrj/io/eval/KmeansEvaluator.java   | 135 ++++++++++++++
 .../client/solrj/io/eval/RowAtEvaluator.java    |  56 ++++++
 .../solrj/io/eval/SetColumnLabelsEvaluator.java |  47 +++++
 .../solrj/io/eval/SetRowLabelsEvaluator.java    |  47 +++++
 .../solrj/io/eval/TermVectorsEvaluator.java     |  13 +-
 .../solrj/io/eval/TopFeaturesEvaluator.java     | 112 +++++++++++
 .../client/solrj/io/eval/UnitEvaluator.java     |   5 +-
 .../solr/client/solrj/io/stream/LetStream.java  |  28 ++-
 .../solrj/io/stream/StreamExpressionTest.java   | 186 ++++++++++++++++++-
 14 files changed, 897 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index ee3a17b..206136c 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -296,6 +296,15 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
         .withFunctionName("getColumnLabels", GetColumnLabelsEvaluator.class)
         .withFunctionName("getRowLabels", GetRowLabelsEvaluator.class)
         .withFunctionName("getAttribute", GetAttributeEvaluator.class)
+        .withFunctionName("kmeans", KmeansEvaluator.class)
+        .withFunctionName("getCentroids", GetCentroidsEvaluator.class)
+        .withFunctionName("getCluster", GetClusterEvaluator.class)
+        .withFunctionName("topFeatures", TopFeaturesEvaluator.class)
+        .withFunctionName("featureSelect", FeatureSelectEvaluator.class)
+        .withFunctionName("rowAt", RowAtEvaluator.class)
+        .withFunctionName("colAt", ColumnAtEvaluator.class)
+        .withFunctionName("setColumnLabels", SetColumnLabelsEvaluator.class)
+        .withFunctionName("setRowLabels", SetRowLabelsEvaluator.class)
 
         // Boolean Stream Evaluators
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnAtEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnAtEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnAtEvaluator.java
new file mode 100644
index 0000000..5714096
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ColumnAtEvaluator.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import java.util.List;
+import java.util.ArrayList;
+
+public class ColumnAtEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  public ColumnAtEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    if(2 != containedEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting exactly 2 values but found %d",expression,containedEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+
+    if(value1 instanceof Matrix) {
+      Matrix matrix = (Matrix) value1;
+      Number index = (Number) value2;
+      double[][] data = matrix.getData();
+      List<Number> list = new ArrayList();
+      for(double[] row : data) {
+        list.add(row[index.intValue()]);
+      }
+      return list;
+    } else {
+      throw new IOException("The rowAt function expects a matrix as the first parameter");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FeatureSelectEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FeatureSelectEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FeatureSelectEvaluator.java
new file mode 100644
index 0000000..b3c06d8
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FeatureSelectEvaluator.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Locale;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import java.util.List;
+import java.util.Set;
+import java.util.ArrayList;
+
+public class FeatureSelectEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  public FeatureSelectEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    if(2 != containedEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting exactly 2 values but found %d",expression,containedEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+
+    if(value1 instanceof Matrix) {
+      Matrix matrix = (Matrix) value1;
+      double[][] data = matrix.getData();
+
+      List<String> labels = matrix.getColumnLabels();
+      Set<String> features = new HashSet();
+      loadFeatures(value2, features);
+
+      List<String> newColumnLabels = new ArrayList();
+
+      for(String label : labels) {
+        if(features.contains(label)) {
+         newColumnLabels.add(label);
+        }
+      }
+
+      double[][] selectFeatures = new double[data.length][newColumnLabels.size()];
+
+      for(int i=0; i<data.length; i++) {
+        double[] currentRow = data[i];
+        double[] newRow = new double[newColumnLabels.size()];
+
+        int index = -1;
+        for(int l=0; l<currentRow.length; l++) {
+          String label = labels.get(l);
+          if(features.contains(label)) {
+            newRow[++index] = currentRow[l];
+          }
+        }
+        selectFeatures[i] = newRow;
+      }
+
+      Matrix newMatrix = new Matrix(selectFeatures);
+      newMatrix.setRowLabels(matrix.getRowLabels());
+      newMatrix.setColumnLabels(newColumnLabels);
+      return newMatrix;
+    } else {
+      throw new IOException("The featureSelect function expects a matrix as a parameter");
+    }
+  }
+
+  private void loadFeatures(Object o, Set<String> features) {
+    List list = (List)o;
+    for(Object v : list) {
+      if(v instanceof List) {
+        loadFeatures(v, features);
+      } else {
+        features.add((String)v);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetCentroidsEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetCentroidsEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetCentroidsEvaluator.java
new file mode 100644
index 0000000..e55263d
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetCentroidsEvaluator.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import java.util.List;
+
+import org.apache.commons.math3.ml.clustering.CentroidCluster;
+import org.apache.commons.math3.ml.clustering.Clusterable;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class GetCentroidsEvaluator extends RecursiveObjectEvaluator implements OneValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public GetCentroidsEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value) throws IOException {
+    if(!(value instanceof KmeansEvaluator.ClusterTuple)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting a clustering result",toExpression(constructingFactory), value.getClass().getSimpleName()));
+    } else {
+      KmeansEvaluator.ClusterTuple clusterTuple = (KmeansEvaluator.ClusterTuple)value;
+      List<CentroidCluster<KmeansEvaluator.ClusterPoint>> clusters = clusterTuple.getClusters();
+      double[][] data = new double[clusters.size()][];
+      for(int i=0; i<clusters.size(); i++) {
+        CentroidCluster<KmeansEvaluator.ClusterPoint> centroidCluster = clusters.get(i);
+        Clusterable clusterable = centroidCluster.getCenter();
+        data[i] = clusterable.getPoint();
+      }
+      Matrix centroids = new Matrix(data);
+      centroids.setColumnLabels(clusterTuple.getColumnLabels());
+      return centroids;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetClusterEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetClusterEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetClusterEvaluator.java
new file mode 100644
index 0000000..903670d
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/GetClusterEvaluator.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import java.util.List;
+import java.util.ArrayList;
+
+import org.apache.commons.math3.ml.clustering.CentroidCluster;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class GetClusterEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public GetClusterEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+    if(!(value1 instanceof KmeansEvaluator.ClusterTuple)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting a cluster result.",toExpression(constructingFactory), value1.getClass().getSimpleName()));
+    } else {
+
+      KmeansEvaluator.ClusterTuple clusterTuple = (KmeansEvaluator.ClusterTuple)value1;
+      List<CentroidCluster<KmeansEvaluator.ClusterPoint>> clusters = clusterTuple.getClusters();
+
+      Number index = (Number)value2;
+      CentroidCluster cluster = clusters.get(index.intValue());
+      List points = cluster.getPoints();
+      List<String> rowLabels = new ArrayList();
+      double[][] data = new double[points.size()][];
+
+      for(int i=0; i<points.size(); i++) {
+        KmeansEvaluator.ClusterPoint p = (KmeansEvaluator.ClusterPoint)points.get(i);
+        data[i] = p.getPoint();
+        rowLabels.add(p.getId());
+      }
+
+      Matrix matrix = new Matrix(data);
+      matrix.setRowLabels(rowLabels);
+      matrix.setColumnLabels(clusterTuple.getColumnLabels());
+      return matrix;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KmeansEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KmeansEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KmeansEvaluator.java
new file mode 100644
index 0000000..410d8bb
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/KmeansEvaluator.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+
+
+import org.apache.commons.math3.ml.clustering.CentroidCluster;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.commons.math3.ml.clustering.Clusterable;
+import org.apache.commons.math3.ml.clustering.KMeansPlusPlusClusterer;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class KmeansEvaluator extends RecursiveObjectEvaluator implements ManyValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+
+
+  public KmeansEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object... values) throws IOException {
+
+    if(values.length < 2) {
+      throw new IOException("kmeans expects atleast two parameters a Matrix of observations and k");
+    }
+
+    Matrix matrix = null;
+    int k = 0;
+    int maxIterations = 1000;
+
+    if(values[0] instanceof Matrix) {
+      matrix = (Matrix)values[0];
+    } else {
+      throw new IOException("The first parameter for kmeans should be the observation matrix.");
+    }
+
+    if(values[1] instanceof Number) {
+      k = ((Number)values[1]).intValue();
+    } else {
+      throw new IOException("The second parameter for kmeans should be k.");
+    }
+
+    if(values.length == 3) {
+      maxIterations = ((Number)values[2]).intValue();
+    }
+
+    KMeansPlusPlusClusterer<ClusterPoint> kmeans = new KMeansPlusPlusClusterer(k, maxIterations);
+    List<ClusterPoint> points = new ArrayList();
+    double[][] data = matrix.getData();
+
+    List<String> ids = matrix.getRowLabels();
+
+    for(int i=0; i<data.length; i++) {
+      double[] vec = data[i];
+      points.add(new ClusterPoint(ids.get(i), vec));
+    }
+
+    Map fields = new HashMap();
+
+    fields.put("k", k);
+    fields.put("distance", "euclidean");
+    fields.put("maxIterations", maxIterations);
+
+    return new ClusterTuple(fields, kmeans.cluster(points), matrix.getColumnLabels());
+  }
+
+  public static class ClusterPoint implements Clusterable {
+
+    private double[] point;
+    private String id;
+
+    public ClusterPoint(String id, double[] point) {
+      this.id = id;
+      this.point = point;
+    }
+
+    public double[] getPoint() {
+      return this.point;
+    }
+
+    public String getId() {
+      return this.id;
+    }
+  }
+
+  public static class ClusterTuple extends Tuple {
+
+    private List<String> columnLabels;
+    private List<CentroidCluster<ClusterPoint>> clusters;
+
+    public ClusterTuple(Map fields,
+                        List<CentroidCluster<ClusterPoint>> clusters,
+                        List<String> columnLabels) {
+      super(fields);
+      this.clusters = clusters;
+      this.columnLabels = columnLabels;
+    }
+
+    public List<String> getColumnLabels() {
+      return this.columnLabels;
+    }
+
+    public List<CentroidCluster<ClusterPoint>> getClusters() {
+      return this.clusters;
+    }
+
+
+
+
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowAtEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowAtEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowAtEvaluator.java
new file mode 100644
index 0000000..982cfbb
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RowAtEvaluator.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import java.util.List;
+import java.util.ArrayList;
+
+public class RowAtEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  public RowAtEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    if(2 != containedEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting exactly 2 values but found %d",expression,containedEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+
+    if(value1 instanceof Matrix) {
+      Matrix matrix = (Matrix) value1;
+      Number index = (Number) value2;
+      double[] row = matrix.getData()[index.intValue()];
+      List<Number> list = new ArrayList();
+      for(double d : row) {
+        list.add(d);
+      }
+
+      return list;
+    } else {
+      throw new IOException("The rowAt function expects a matrix as the first parameter");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetColumnLabelsEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetColumnLabelsEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetColumnLabelsEvaluator.java
new file mode 100644
index 0000000..1d589af
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetColumnLabelsEvaluator.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.List;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class SetColumnLabelsEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public SetColumnLabelsEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+    if(!(value1 instanceof Matrix)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting a Matrix",toExpression(constructingFactory), value1.getClass().getSimpleName()));
+    } else if(!(value2 instanceof List)) {
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting an array of labels.",toExpression(constructingFactory), value2.getClass().getSimpleName()));
+    } else {
+      Matrix matrix = (Matrix)value1;
+      List<String> colLabels =  (List<String>)value2;
+      matrix.setColumnLabels(colLabels);
+      return matrix;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetRowLabelsEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetRowLabelsEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetRowLabelsEvaluator.java
new file mode 100644
index 0000000..66a59c8
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SetRowLabelsEvaluator.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.List;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class SetRowLabelsEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  private static final long serialVersionUID = 1;
+
+  public SetRowLabelsEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+    if(!(value1 instanceof Matrix)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting a Matrix",toExpression(constructingFactory), value1.getClass().getSimpleName()));
+    } else if(!(value2 instanceof List)) {
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for value, expecting an array of labels.",toExpression(constructingFactory), value2.getClass().getSimpleName()));
+    } else {
+      Matrix matrix = (Matrix)value1;
+      List<String> rowlabels =  (List<String>)value2;
+      matrix.setRowLabels(rowlabels);
+      return matrix;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TermVectorsEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TermVectorsEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TermVectorsEvaluator.java
index 8bf050d..7c09712 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TermVectorsEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TermVectorsEvaluator.java
@@ -38,6 +38,7 @@ public class TermVectorsEvaluator extends RecursiveObjectEvaluator implements Ma
   private int minTermLength = 3;
   private double minDocFreq = .05; // 5% of the docs min
   private double maxDocFreq = .5;  // 50% of the docs max
+  private String[] excludes;
 
   public TermVectorsEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
     super(expression, factory);
@@ -57,6 +58,8 @@ public class TermVectorsEvaluator extends RecursiveObjectEvaluator implements Ma
         if (maxDocFreq < 0 || maxDocFreq > 1) {
           throw new IOException("Doc frequency percentage must be between 0 and 1");
         }
+      } else if(namedParam.getName().equals("exclude")) {
+        this.excludes = namedParam.getParameter().toString().split(",");
       } else {
         throw new IOException("Unexpected named parameter:" + namedParam.getName());
       }
@@ -100,6 +103,7 @@ public class TermVectorsEvaluator extends RecursiveObjectEvaluator implements Ma
         String id = tuple.getString("id");
         rowLabels.add(id);
 
+        OUTER:
         for (String term : terms) {
 
           if (term.length() < minTermLength) {
@@ -107,6 +111,14 @@ public class TermVectorsEvaluator extends RecursiveObjectEvaluator implements Ma
             continue;
           }
 
+          if(excludes != null) {
+            for (String exclude : excludes) {
+              if (term.indexOf(exclude) > -1) {
+                continue OUTER;
+              }
+            }
+          }
+
           if (!docTerms.contains(term)) {
             docTerms.add(term);
             if (docFreqs.containsKey(term)) {
@@ -134,7 +146,6 @@ public class TermVectorsEvaluator extends RecursiveObjectEvaluator implements Ma
           it.remove();
         }
       }
-
       int totalTerms = docFreqs.size();
       Set<String> keys = docFreqs.keySet();
       features.addAll(keys);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TopFeaturesEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TopFeaturesEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TopFeaturesEvaluator.java
new file mode 100644
index 0000000..e2100b1
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TopFeaturesEvaluator.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.TreeSet;
+
+public class TopFeaturesEvaluator extends RecursiveObjectEvaluator implements TwoValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  public TopFeaturesEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    if(2 != containedEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting exactly 2 values but found %d",expression,containedEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Object doWork(Object value1, Object value2) throws IOException {
+
+    int k = ((Number)value2).intValue();
+
+    if(value1 instanceof Matrix) {
+
+      Matrix matrix = (Matrix) value1;
+      List<String> features = matrix.getColumnLabels();
+
+      if(features == null) {
+        throw new IOException("Matrix column labels cannot be null for topFeatures function.");
+      }
+
+      double[][] data = matrix.getData();
+      List<List<String>> topFeatures = new ArrayList();
+
+      for(int i=0; i<data.length; i++) {
+        double[] row = data[i];
+        List<String> featuresRow = new ArrayList();
+        List<Integer> indexes = getMaxIndexes(row, k);
+        for(int index : indexes) {
+          featuresRow.add(features.get(index));
+        }
+        topFeatures.add(featuresRow);
+      }
+
+      return topFeatures;
+    }  else {
+      throw new IOException("The topFeatures function expects a matrix as the first parameter");
+    }
+  }
+
+  private List<Integer> getMaxIndexes(double[] values, int k) {
+    TreeSet<Pair> set = new TreeSet();
+    for(int i=0; i<values.length; i++) {
+      set.add(new Pair(i, values[i]));
+      if(set.size() > k) {
+        set.pollFirst();
+      }
+    }
+
+    List<Integer> top = new ArrayList(k);
+    while(set.size() > 0) {
+      top.add(set.pollLast().getIndex());
+    }
+
+    return top;
+  }
+
+  public static class Pair implements Comparable<Pair> {
+
+    private int index;
+    private Double value;
+
+    public Pair(int index, Number value) {
+      this.index = index;
+      this.value = value.doubleValue();
+    }
+
+    public int compareTo(Pair pair) {
+      return value.compareTo(pair.value);
+    }
+
+    public int getIndex() {
+      return this.index;
+    }
+
+    public Number getValue() {
+      return value;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UnitEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UnitEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UnitEvaluator.java
index 8be990d..16d72ae 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UnitEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UnitEvaluator.java
@@ -53,7 +53,10 @@ public class UnitEvaluator extends RecursiveObjectEvaluator implements OneValueW
         unitData[i] = unitRow;
       }
 
-      return new Matrix(unitData);
+      Matrix m = new Matrix(unitData);
+      m.setRowLabels(matrix.getRowLabels());
+      m.setColumnLabels(matrix.getRowLabels());
+      return m;
     } else if(value instanceof List) {
       List<Number> values = (List<Number>)value;
       double[] doubles = new double[values.size()];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
index ce883ad..8bb12a5 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
@@ -22,6 +22,8 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.HashSet;
+
 
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.StreamComparator;
@@ -50,14 +52,25 @@ public class LetStream extends TupleStream implements Expressible {
 
     List<StreamExpressionNamedParameter> namedParams = factory.getNamedOperands(expression);
     //Get all the named params
-    boolean echo = false;
+    Set<String> echo = null;
+    boolean echoAll = false;
     String currentName = null;
     for(StreamExpressionParameter np : namedParams) {
       String name = ((StreamExpressionNamedParameter)np).getName();
       currentName = name;
 
       if(name.equals("echo")) {
-        echo = true;
+        echo = new HashSet();
+        String echoString = ((StreamExpressionNamedParameter) np).getParameter().toString().trim();
+        if(echoString.equalsIgnoreCase("true")) {
+          echoAll = true;
+        } else {
+          String[] echoVars = echoString.split(",");
+          for (String echoVar : echoVars) {
+            echo.add(echoVar.trim());
+          }
+        }
+
         continue;
       }
 
@@ -75,14 +88,21 @@ public class LetStream extends TupleStream implements Expressible {
       stream = factory.constructStream(streamExpressions.get(0));
     } else {
       StreamExpression tupleExpression = new StreamExpression("tuple");
-      if(!echo) {
+      if(!echoAll && echo == null) {
         tupleExpression.addParameter(new StreamExpressionNamedParameter(currentName, currentName));
       } else {
         Set<String> names = letParams.keySet();
         for(String name : names) {
-          tupleExpression.addParameter(new StreamExpressionNamedParameter(name, name));
+          if(echoAll) {
+            tupleExpression.addParameter(new StreamExpressionNamedParameter(name, name));
+          } else {
+            if(echo.contains(name)) {
+              tupleExpression.addParameter(new StreamExpressionNamedParameter(name, name));
+            }
+          }
         }
       }
+
       stream = factory.constructStream(tupleExpression);
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a08f7127/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 2a9df01..6f1e61f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -6175,7 +6175,14 @@ public class StreamExpressionTest extends SolrCloudTestCase {
 
   @Test
   public void testMatrix() throws Exception {
-    String cexpr = "matrix(array(1, 2, 3), rev(array(4,5,6)))";
+    String cexpr = "let(echo=true," +
+        "               a=setColumnLabels(matrix(array(1, 2, 3), " +
+        "                                        rev(array(4,5,6)))," +
+        "                                 array(col1, col2, col3))," +
+        "               b=rowAt(a, 1)," +
+        "               c=colAt(a, 2)," +
+        "               d=getColumnLabels(a)," +
+        "               e=topFeatures(a, 1))";
     ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
     paramsLoc.set("expr", cexpr);
     paramsLoc.set("qt", "/stream");
@@ -6185,7 +6192,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     solrStream.setStreamContext(context);
     List<Tuple> tuples = getTuples(solrStream);
     assertTrue(tuples.size() == 1);
-    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("return-value");
+    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("a");
 
     List<Number> array1 = out.get(0);
     assertEquals(array1.size(), 3);
@@ -6198,6 +6205,31 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     assertEquals(array2.get(0).doubleValue(), 6.0, 0.0);
     assertEquals(array2.get(1).doubleValue(), 5.0, 0.0);
     assertEquals(array2.get(2).doubleValue(), 4.0, 0.0);
+
+    List<Number> row = (List<Number>)tuples.get(0).get("b");
+
+    assertEquals(row.size(), 3);
+    assertEquals(array2.get(0).doubleValue(), 6.0, 0.0);
+    assertEquals(array2.get(1).doubleValue(), 5.0, 0.0);
+    assertEquals(array2.get(2).doubleValue(), 4.0, 0.0);
+
+    List<Number> col = (List<Number>)tuples.get(0).get("c");
+    assertEquals(col.size(), 2);
+    assertEquals(col.get(0).doubleValue(), 3.0, 0.0);
+    assertEquals(col.get(1).doubleValue(), 4.0, 0.0);
+
+    List<String> colLabels = (List<String>)tuples.get(0).get("d");
+    assertEquals(colLabels.size(), 3);
+    assertEquals(colLabels.get(0), "col1");
+    assertEquals(colLabels.get(1), "col2");
+    assertEquals(colLabels.get(2), "col3");
+
+    List<List<String>> features  = (List<List<String>>)tuples.get(0).get("e");
+    assertEquals(features.size(), 2);
+    assertEquals(features.get(0).size(), 1);
+    assertEquals(features.get(1).size(), 1);
+    assertEquals(features.get(0).get(0), "col3");
+    assertEquals(features.get(1).get(0), "col1");
   }
 
 
@@ -6784,6 +6816,78 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     assertEquals(docFreqs.get("world").intValue(), 1);
 
 
+    //Test exclude. This should drop off the term jim
+
+    cexpr = "let(echo=true," +
+        "        a=select(list(tuple(id=\"1\", text=\"hello world\"), " +
+        "                      tuple(id=\"2\", text=\"hello steve\"), " +
+        "                      tuple(id=\"3\", text=\"hello jim jim\"), " +
+        "                      tuple(id=\"4\", text=\"hello jack\")), id, analyze(text, test_t) as terms)," +
+        "        b=termVectors(a, exclude=jim, minDocFreq=0, maxDocFreq=1)," +
+        "        c=getRowLabels(b)," +
+        "        d=getColumnLabels(b)," +
+        "        e=getAttribute(b, docFreqs))";
+
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    solrStream = new SolrStream(url, paramsLoc);
+    context = new StreamContext();
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    termVectors  = (List<List<Number>>)tuples.get(0).get("b");
+    assertEquals(termVectors.size(), 4);
+    termVector = termVectors.get(0);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 1.916290731874155, 0.0);
+
+    termVector = termVectors.get(1);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(2);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(3);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    rowLabels  = (List<String>)tuples.get(0).get("c");
+    assertEquals(rowLabels.size(), 4);
+    assertEquals(rowLabels.get(0), "1");
+    assertEquals(rowLabels.get(1), "2");
+    assertEquals(rowLabels.get(2), "3");
+    assertEquals(rowLabels.get(3), "4");
+
+    columnLabels  = (List<String>)tuples.get(0).get("d");
+    assertEquals(columnLabels.size(), 4);
+    assertEquals(columnLabels.get(0), "hello");
+    assertEquals(columnLabels.get(1), "jack");
+    assertEquals(columnLabels.get(2), "steve");
+    assertEquals(columnLabels.get(3), "world");
+
+    docFreqs  = (Map<String, Number>)tuples.get(0).get("e");
+
+    assertEquals(docFreqs.size(), 4);
+    assertEquals(docFreqs.get("hello").intValue(), 4);
+    assertEquals(docFreqs.get("jack").intValue(), 1);
+    assertEquals(docFreqs.get("steve").intValue(), 1);
+    assertEquals(docFreqs.get("world").intValue(), 1);
+
     //Test minDocFreq attribute at .5. This should eliminate all but the term hello
 
     cexpr = "let(echo=true," +
@@ -6884,6 +6988,84 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     assertTrue(out.get(5).intValue() == 6);
   }
 
+  @Test
+  public void testKmeans() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=array(1,1,1,0,0,0)," +
+        "               b=array(1,1,1,0,0,0)," +
+        "               c=array(0,0,0,1,1,1)," +
+        "               d=array(0,0,0,1,1,1)," +
+        "               e=setRowLabels(matrix(a,b,c,d), " +
+        "                              array(doc1, doc2, doc3, doc4))," +
+        "               f=kmeans(e, 2)," +
+        "               g=getCluster(f, 0)," +
+        "               h=getCluster(f, 1)," +
+        "               i=getCentroids(f)," +
+        "               j=getRowLabels(g)," +
+        "               k=getRowLabels(h))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> cluster1 = (List<List<Number>>)tuples.get(0).get("g");
+    List<List<Number>> cluster2 = (List<List<Number>>)tuples.get(0).get("h");
+    List<List<Number>> centroids = (List<List<Number>>)tuples.get(0).get("i");
+    List<String> labels1 = (List<String>)tuples.get(0).get("j");
+    List<String> labels2 = (List<String>)tuples.get(0).get("k");
+
+    assertEquals(cluster1.size(), 2);
+    assertEquals(cluster2.size(), 2);
+    assertEquals(centroids.size(), 2);
+
+    //Assert that the docs are not in both clusters
+    assertTrue(!(labels1.contains("doc1") && labels2.contains("doc1")));
+    assertTrue(!(labels1.contains("doc2") && labels2.contains("doc2")));
+    assertTrue(!(labels1.contains("doc3") && labels2.contains("doc3")));
+    assertTrue(!(labels1.contains("doc4") && labels2.contains("doc4")));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels1
+    assertTrue((labels1.contains("doc1") && labels1.contains("doc2")) ||
+              ((labels1.contains("doc3") && labels1.contains("doc4"))));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels2
+    assertTrue((labels2.contains("doc1") && labels2.contains("doc2")) ||
+        ((labels2.contains("doc3") && labels2.contains("doc4"))));
+
+    if(labels1.contains("doc1")) {
+      assertEquals(centroids.get(0).get(0).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(1).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(2).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(3).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(4).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(5).doubleValue(), 0.0, 0.0);
+
+      assertEquals(centroids.get(1).get(0).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(1).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(2).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(3).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(4).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(5).doubleValue(), 1.0, 0.0);
+    } else {
+      assertEquals(centroids.get(0).get(0).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(1).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(2).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(3).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(4).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(5).doubleValue(), 1.0, 0.0);
+
+      assertEquals(centroids.get(1).get(0).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(1).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(2).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(3).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(4).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(5).doubleValue(), 0.0, 0.0);
+    }
+  }
 
   @Test
   public void testEBEMultiply() throws Exception {


[13/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
deleted file mode 100644
index 154673a..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test of the Collections API with the MiniSolrCloudCluster.
- */
-@LuceneTestCase.Slow
-public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final int numShards = 2;
-  private static final int numReplicas = 2;
-  private static final int maxShardsPerNode = 1;
-  private static final int nodeCount = 5;
-  private static final String configName = "solrCloudCollectionConfig";
-  private static final Map<String,String> collectionProperties  // ensure indexes survive core shutdown
-      = Collections.singletonMap("solr.directoryFactory", "solr.StandardDirectoryFactory");
-
-  @Override
-  public void setUp() throws Exception {
-    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
-    super.setUp();
-  }
-  
-  @Override
-  public void tearDown() throws Exception {
-    cluster.shutdown();
-    super.tearDown();
-  }
-
-  private void createCollection(String collectionName, String createNodeSet) throws Exception {
-    if (random().nextBoolean()) { // process asynchronously
-      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
-          .setMaxShardsPerNode(maxShardsPerNode)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .processAndWait(cluster.getSolrClient(), 30);
-    }
-    else {
-      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
-          .setMaxShardsPerNode(maxShardsPerNode)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .process(cluster.getSolrClient());
-    }
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish
-        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
-  }
-
-  @Test
-  public void testCollectionCreateSearchDelete() throws Exception {
-    final CloudSolrClient client = cluster.getSolrClient();
-    final String collectionName = "testcollection";
-
-    assertNotNull(cluster.getZkServer());
-    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
-    assertEquals(nodeCount, jettys.size());
-    for (JettySolrRunner jetty : jettys) {
-      assertTrue(jetty.isRunning());
-    }
-
-    // shut down a server
-    JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
-    assertTrue(stoppedServer.isStopped());
-    assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
-
-    // create a server
-    JettySolrRunner startedServer = cluster.startJettySolrRunner();
-    assertTrue(startedServer.isRunning());
-    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
-
-    // create collection
-    createCollection(collectionName, null);
-
-    // modify/query collection
-    new UpdateRequest().add("id", "1").commit(client, collectionName);
-    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
-    assertEquals(1, rsp.getResults().getNumFound());
-
-    // remove a server not hosting any replicas
-    ZkStateReader zkStateReader = client.getZkStateReader();
-    zkStateReader.forceUpdateCollection(collectionName);
-    ClusterState clusterState = zkStateReader.getClusterState();
-    Map<String,JettySolrRunner> jettyMap = new HashMap<>();
-    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-      String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
-      jettyMap.put(key, jetty);
-    }
-    Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
-    // track the servers not host replicas
-    for (Slice slice : slices) {
-      jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
-      for (Replica replica : slice.getReplicas()) {
-        jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
-      }
-    }
-    assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
-    JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
-    jettys = cluster.getJettySolrRunners();
-    for (int i = 0; i < jettys.size(); ++i) {
-      if (jettys.get(i).equals(jettyToStop)) {
-        cluster.stopJettySolrRunner(i);
-        assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
-      }
-    }
-
-    // re-create a server (to restore original nodeCount count)
-    startedServer = cluster.startJettySolrRunner(jettyToStop);
-    assertTrue(startedServer.isRunning());
-    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
-
-    CollectionAdminRequest.deleteCollection(collectionName).process(client);
-    AbstractDistribZkTestBase.waitForCollectionToDisappear
-        (collectionName, client.getZkStateReader(), true, true, 330);
-
-    // create it again
-    createCollection(collectionName, null);
-
-    // check that there's no left-over state
-    assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-
-    // modify/query collection
-    new UpdateRequest().add("id", "1").commit(client, collectionName);
-    assertEquals(1, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-  }
-
-  @Test
-  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
-
-    final String collectionName = "testSolrCloudCollectionWithoutCores";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    assertNotNull(cluster.getZkServer());
-    assertFalse(cluster.getJettySolrRunners().isEmpty());
-
-    // create collection
-    createCollection(collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY);
-
-    // check the collection's corelessness
-    int coreCount = 0;
-    DocCollection docCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
-    for (Map.Entry<String,Slice> entry : docCollection.getSlicesMap().entrySet()) {
-      coreCount += entry.getValue().getReplicasMap().entrySet().size();
-    }
-    assertEquals(0, coreCount);
-
-    // delete the collection
-    CollectionAdminRequest.deleteCollection(collectionName).process(client);
-    AbstractDistribZkTestBase.waitForCollectionToDisappear
-        (collectionName, client.getZkStateReader(), true, true, 330);
-  }
-
-  @Test
-  public void testStopAllStartAll() throws Exception {
-
-    final String collectionName = "testStopAllStartAllCollection";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    assertNotNull(cluster.getZkServer());
-    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
-    assertEquals(nodeCount, jettys.size());
-    for (JettySolrRunner jetty : jettys) {
-      assertTrue(jetty.isRunning());
-    }
-
-    final SolrQuery query = new SolrQuery("*:*");
-    final SolrInputDocument doc = new SolrInputDocument();
-
-    // create collection
-    createCollection(collectionName, null);
-
-    ZkStateReader zkStateReader = client.getZkStateReader();
-
-    // modify collection
-    final int numDocs = 1 + random().nextInt(10);
-    for (int ii = 1; ii <= numDocs; ++ii) {
-      doc.setField("id", ""+ii);
-      client.add(collectionName, doc);
-      if (ii*2 == numDocs) client.commit(collectionName);
-    }
-    client.commit(collectionName);
-
-    // query collection
-    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
-
-    // the test itself
-    zkStateReader.forceUpdateCollection(collectionName);
-    final ClusterState clusterState = zkStateReader.getClusterState();
-
-    final Set<Integer> leaderIndices = new HashSet<>();
-    final Set<Integer> followerIndices = new HashSet<>();
-    {
-      final Map<String,Boolean> shardLeaderMap = new HashMap<>();
-      for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
-        for (final Replica replica : slice.getReplicas()) {
-          shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
-        }
-        shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
-      }
-      for (int ii = 0; ii < jettys.size(); ++ii) {
-        final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
-        final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
-        final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
-        if (Boolean.TRUE.equals(isLeader)) {
-          leaderIndices.add(ii);
-        } else if (Boolean.FALSE.equals(isLeader)) {
-          followerIndices.add(ii);
-        } // else neither leader nor follower i.e. node without a replica (for our collection)
-      }
-    }
-    final List<Integer> leaderIndicesList = new ArrayList<>(leaderIndices);
-    final List<Integer> followerIndicesList = new ArrayList<>(followerIndices);
-
-    // first stop the followers (in no particular order)
-    Collections.shuffle(followerIndicesList, random());
-    for (Integer ii : followerIndicesList) {
-      if (!leaderIndices.contains(ii)) {
-        cluster.stopJettySolrRunner(jettys.get(ii));
-      }
-    }
-
-    // then stop the leaders (again in no particular order)
-    Collections.shuffle(leaderIndicesList, random());
-    for (Integer ii : leaderIndicesList) {
-      cluster.stopJettySolrRunner(jettys.get(ii));
-    }
-
-    // calculate restart order
-    final List<Integer> restartIndicesList = new ArrayList<>();
-    Collections.shuffle(leaderIndicesList, random());
-    restartIndicesList.addAll(leaderIndicesList);
-    Collections.shuffle(followerIndicesList, random());
-    restartIndicesList.addAll(followerIndicesList);
-    if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
-
-    // and then restart jettys in that order
-    for (Integer ii : restartIndicesList) {
-      final JettySolrRunner jetty = jettys.get(ii);
-      if (!jetty.isRunning()) {
-        cluster.startJettySolrRunner(jetty);
-        assertTrue(jetty.isRunning());
-      }
-    }
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-    zkStateReader.forceUpdateCollection(collectionName);
-
-    // re-query collection
-    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
deleted file mode 100644
index 40a6e30..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.core.backup.BackupManager.*;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.hdfs.HdfsTestUtil;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.HdfsBackupRepository;
-import org.apache.solr.util.BadHdfsThreadsFilter;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the tests for HDFS integration for Solr backup/restore capability.
- */
-@ThreadLeakFilters(defaultFilters = true, filters = {
-    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
-})
-public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
-  public static final String SOLR_XML = "<solr>\n" +
-      "\n" +
-      "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
-      "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
-      "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
-      "\n" +
-      "  <shardHandlerFactory name=\"shardHandlerFactory\" class=\"HttpShardHandlerFactory\">\n" +
-      "    <str name=\"urlScheme\">${urlScheme:}</str>\n" +
-      "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n" +
-      "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n" +
-      "  </shardHandlerFactory>\n" +
-      "\n" +
-      "  <solrcloud>\n" +
-      "    <str name=\"host\">127.0.0.1</str>\n" +
-      "    <int name=\"hostPort\">${hostPort:8983}</int>\n" +
-      "    <str name=\"hostContext\">${hostContext:solr}</str>\n" +
-      "    <int name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n" +
-      "    <bool name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n" +
-      "    <int name=\"leaderVoteWait\">10000</int>\n" +
-      "    <int name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n" +
-      "    <int name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n" +
-      "  </solrcloud>\n" +
-      "  \n" +
-      "  <backup>\n" +
-      "    <repository  name=\"hdfs\" class=\"org.apache.solr.core.backup.repository.HdfsBackupRepository\"> \n" +
-      "      <str name=\"location\">${solr.hdfs.default.backup.path}</str>\n" +
-      "      <str name=\"solr.hdfs.home\">${solr.hdfs.home:}</str>\n" +
-      "      <str name=\"solr.hdfs.confdir\">${solr.hdfs.confdir:}</str>\n" +
-      "    </repository>\n" +
-      "  </backup>\n" +
-      "  \n" +
-      "</solr>\n";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static MiniDFSCluster dfsCluster;
-  private static String hdfsUri;
-  private static FileSystem fs;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
-    hdfsUri = HdfsTestUtil.getURI(dfsCluster);
-    try {
-      URI uri = new URI(hdfsUri);
-      Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
-      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
-      fs = FileSystem.get(uri, conf);
-
-      if (fs instanceof DistributedFileSystem) {
-        // Make sure dfs is not in safe mode
-        while (((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
-          log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
-          try {
-            Thread.sleep(5000);
-          } catch (InterruptedException e) {
-            Thread.interrupted();
-            // continue
-          }
-        }
-      }
-
-      fs.mkdirs(new org.apache.hadoop.fs.Path("/backup"));
-    } catch (IOException | URISyntaxException e) {
-      throw new RuntimeException(e);
-    }
-
-    System.setProperty("solr.hdfs.default.backup.path", "/backup");
-    System.setProperty("solr.hdfs.home", hdfsUri + "/solr");
-    useFactory("solr.StandardDirectoryFactory");
-
-    configureCluster(NUM_SHARDS)// nodes
-    .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-    .withSolrXml(SOLR_XML)
-    .configure();
-  }
-
-  @AfterClass
-  public static void teardownClass() throws Exception {
-    System.clearProperty("solr.hdfs.home");
-    System.clearProperty("solr.hdfs.default.backup.path");
-    System.clearProperty("test.build.data");
-    System.clearProperty("test.cache.data");
-    IOUtils.closeQuietly(fs);
-    fs = null;
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
-  }
-
-  @Override
-  public String getCollectionName() {
-    return "hdfsbackuprestore";
-  }
-
-  @Override
-  public String getBackupRepoName() {
-    return "hdfs";
-  }
-
-  @Override
-  public String getBackupLocation() {
-    return null;
-  }
-
-  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
-    String backupName = "configonlybackup";
-    CloudSolrClient solrClient = cluster.getSolrClient();
-
-    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
-        .setRepositoryName(getBackupRepoName())
-        .setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
-    backup.process(solrClient);
-
-    Map<String,String> params = new HashMap<>();
-    params.put("location", "/backup");
-    params.put("solr.hdfs.home", hdfsUri + "/solr");
-
-    HdfsBackupRepository repo = new HdfsBackupRepository();
-    repo.init(new NamedList<>(params));
-    BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());
-
-    URI baseLoc = repo.createURI("/backup");
-
-    Properties props = mgr.readBackupProperties(baseLoc, backupName);
-    assertNotNull(props);
-    assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
-    assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
-    assertEquals(configName, props.getProperty(COLL_CONF));
-
-    DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
-    assertNotNull(collectionState);
-    assertEquals(collectionName, collectionState.getName());
-
-    URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
-    assertTrue(repo.exists(configDirLoc));
-
-    Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
-    URI backupLoc = repo.resolve(baseLoc, backupName);
-    String[] dirs = repo.listAll(backupLoc);
-    for (String d : dirs) {
-      assertTrue(expected.contains(d));
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
deleted file mode 100644
index c0db46e..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.junit.BeforeClass;
-
-/**
- * This class implements the tests for local file-system integration for Solr backup/restore capability.
- * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case
- * such file-system would be exposed via local file-system API.
- */
-public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
-  private static String backupLocation;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    configureCluster(NUM_SHARDS)// nodes
-        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-        .configure();
-
-    boolean whitespacesInPath = random().nextBoolean();
-    if (whitespacesInPath) {
-      backupLocation = createTempDir("my backup").toAbsolutePath().toString();
-    } else {
-      backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
-    }
-  }
-
-  @Override
-  public String getCollectionName() {
-    return "backuprestore";
-  }
-
-  @Override
-  public String getBackupRepoName() {
-    return null;
-  }
-
-  @Override
-  public String getBackupLocation() {
-    return backupLocation;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
deleted file mode 100644
index f654e8f..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-@Slow
-public class TestReplicaProperties extends ReplicaPropertiesBase {
-
-  public static final String COLLECTION_NAME = "testcollection";
-
-  public TestReplicaProperties() {
-    schemaString = "schema15.xml";      // we need a string id
-    sliceCount = 2;
-  }
-
-  @Test
-  @ShardsFixed(num = 4)
-  public void test() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
-      // shards, replicationfactor, maxreplicaspernode
-      int shards = random().nextInt(7);
-      if (shards < 2) shards = 2;
-      int rFactor = random().nextInt(4);
-      if (rFactor < 2) rFactor = 2;
-      createCollection(null, COLLECTION_NAME, shards, rFactor, shards * rFactor + 1, client, null, "conf1");
-    }
-
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
-    waitForRecoveriesToFinish(COLLECTION_NAME, false);
-
-    listCollection();
-
-    clusterAssignPropertyTest();
-  }
-
-  private void listCollection() throws IOException, SolrServerException {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.LIST.toString());
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      List<String> collections = (List<String>) rsp.get("collections");
-      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
-      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
-      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
-    }
-  }
-
-
-  private void clusterAssignPropertyTest() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "property", "preferredLeader");
-      } catch (SolrException se) {
-        assertTrue("Should have seen missing required parameter 'collection' error",
-            se.getMessage().contains("Missing required parameter: collection"));
-      }
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "preferredLeader");
-
-      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "preferredleader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "property.newunique",
-          "shardUnique", "true");
-      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "property.newunique");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "collection", COLLECTION_NAME,
-            "property", "whatever",
-            "shardUnique", "false");
-        fail("Should have thrown an exception here.");
-      } catch (SolrException se) {
-        assertTrue("Should have gotten a specific error message here",
-            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the " +
-                "property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
-      }
-      // Should be able to set non-unique-per-slice values in several places.
-      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
-      List<String> sliceList = new ArrayList<>(slices.keySet());
-      String c1_s1 = sliceList.get(0);
-      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
-      String c1_s1_r1 = replicasList.get(0);
-      String c1_s1_r2 = replicasList.get(1);
-
-      addProperty(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "bogus1",
-          "property.value", "true");
-
-      addProperty(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r2,
-          "property", "property.bogus1",
-          "property.value", "whatever");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "collection", COLLECTION_NAME,
-            "property", "bogus1",
-            "shardUnique", "false");
-        fail("Should have thrown parameter error here");
-      } catch (SolrException se) {
-        assertTrue("Should have caught specific exception ",
-            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the property be " +
-                "pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
-      }
-
-      // Should have no effect despite the "shardUnique" param being set.
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "property.bogus1",
-          "shardUnique", "true");
-
-      verifyPropertyVal(client, COLLECTION_NAME,
-          c1_s1_r1, "bogus1", "true");
-      verifyPropertyVal(client, COLLECTION_NAME,
-          c1_s1_r2, "property.bogus1", "whatever");
-
-      // At this point we've assigned a preferred leader. Make it happen and check that all the nodes that are
-      // leaders _also_ have the preferredLeader property set.
-
-
-      NamedList<Object> res = doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.REBALANCELEADERS.toString(),
-          "collection", COLLECTION_NAME);
-
-      verifyLeaderAssignment(client, COLLECTION_NAME);
-
-    }
-  }
-
-  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
-      throws InterruptedException, KeeperException {
-    String lastFailMsg = "";
-    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
-      lastFailMsg = "";
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
-      for (Slice slice : clusterState.getCollection(collectionName).getSlices()) {
-        Boolean foundLeader = false;
-        Boolean foundPreferred = false;
-        for (Replica replica : slice.getReplicas()) {
-          Boolean isLeader = replica.getBool("leader", false);
-          Boolean isPreferred = replica.getBool("property.preferredleader", false);
-          if (isLeader != isPreferred) {
-            lastFailMsg = "Replica should NOT have preferredLeader != leader. Preferred: " + isPreferred.toString() +
-                " leader is " + isLeader.toString();
-          }
-          if (foundLeader && isLeader) {
-            lastFailMsg = "There should only be a single leader in _any_ shard! Replica " + replica.getName() +
-                " is the second leader in slice " + slice.getName();
-          }
-          if (foundPreferred && isPreferred) {
-            lastFailMsg = "There should only be a single preferredLeader in _any_ shard! Replica " + replica.getName() +
-                " is the second preferredLeader in slice " + slice.getName();
-          }
-          foundLeader = foundLeader ? foundLeader : isLeader;
-          foundPreferred = foundPreferred ? foundPreferred : isPreferred;
-        }
-      }
-      if (lastFailMsg.length() == 0) return;
-      Thread.sleep(100);
-    }
-    fail(lastFailMsg);
-  }
-
-  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
-    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    for (int idx = 0; idx < paramsIn.length; idx += 2) {
-      params.set(paramsIn[idx], paramsIn[idx + 1]);
-    }
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    client.request(request);
-
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
deleted file mode 100644
index a560e75..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.junit.Test;
-
-import java.io.IOException;
-
-public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
-
-  public static final int MAX_WAIT_TIMEOUT_SECONDS = 90;
-
-  public TestRequestStatusCollectionAPI() {
-    schemaString = "schema15.xml";      // we need a string id
-  }
-
-  @Test
-  public void test() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection2");
-    params.set("numShards", 2);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1000");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    // Check for the request to be completed.
-
-    NamedList r = null;
-    NamedList status = null;
-    String message = null;
-
-    params = new ModifiableSolrParams();
-
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1000");
-
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("found [1000] in completed tasks", message);
-
-    // Check for a random (hopefully non-existent request id
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "9999999");
-    try {
-      r = sendRequest(params);
-      status = (NamedList) r.get("status");
-      message = (String) status.get("msg");
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("Did not find [9999999] in any tasks queue", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.SPLITSHARD.toString());
-    params.set("collection", "collection2");
-    params.set("shard", "shard1");
-    params.set(CommonAdminParams.ASYNC, "1001");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    // Check for the request to be completed.
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1001");
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("found [1001] in completed tasks", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection2");
-    params.set("numShards", 2);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1002");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    params = new ModifiableSolrParams();
-
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1002");
-
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-
-    assertEquals("found [1002] in failed tasks", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection3");
-    params.set("numShards", 1);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1002");
-    try {
-      r = sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("Task with the same requestid already exists.", r.get("error"));
-  }
-
-  /**
-   * Helper method to send a status request with specific retry limit and return
-   * the message/null from the success response.
-   */
-  private String sendStatusRequestWithRetry(ModifiableSolrParams params, int maxCounter)
-      throws SolrServerException, IOException{
-    String message = null;
-    while (maxCounter-- > 0) {
-      final NamedList r = sendRequest(params);
-      final NamedList status = (NamedList) r.get("status");
-      final RequestStatusState state = RequestStatusState.fromKey((String) status.get("state"));
-      message = (String) status.get("msg");
-
-      if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
-        return message;
-      }
-
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-      }
-
-    }
-    // Return last state?
-    return message;
-  }
-
-  protected NamedList sendRequest(ModifiableSolrParams params) throws SolrServerException, IOException {
-    SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient).getBaseURL();
-    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
-
-    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 15000)) {
-      return baseServer.request(request);
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
new file mode 100644
index 0000000..058814c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the logic required to test Solr cloud backup/restore capability.
+ */
+public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more
+
+  int replFactor;
+  int numTlogReplicas;
+  int numPullReplicas;
+
+  private static long docsSeed; // see indexDocs()
+
+  @BeforeClass
+  public static void createCluster() throws Exception {
+    docsSeed = random().nextLong();
+  }
+
+  /**
+   * @return The name of the collection to use.
+   */
+  public abstract String getCollectionName();
+
+  /**
+   * @return The name of the backup repository to use.
+   */
+  public abstract String getBackupRepoName();
+
+  /**
+   * @return The absolute path for the backup location.
+   *         Could return null.
+   */
+  public abstract String getBackupLocation();
+
+  @Test
+  public void test() throws Exception {
+    boolean isImplicit = random().nextBoolean();
+    boolean doSplitShardOperation = !isImplicit && random().nextBoolean();
+    replFactor = TestUtil.nextInt(random(), 1, 2);
+    numTlogReplicas = TestUtil.nextInt(random(), 0, 1);
+    numPullReplicas = TestUtil.nextInt(random(), 0, 1);
+    
+    CollectionAdminRequest.Create create = isImplicit ?
+      // NOTE: use shard list with same # of shards as NUM_SHARDS; we assume this later
+      CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
+      CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
+    
+    if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
+      create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
+      if (doSplitShardOperation) {
+        create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2);
+      }
+    }
+    if (random().nextBoolean()) {
+      create.setAutoAddReplicas(true);//just to assert it survives the restoration
+    }
+    Properties coreProps = new Properties();
+    coreProps.put("customKey", "customValue");//just to assert it survives the restoration
+    create.setProperties(coreProps);
+    if (isImplicit) { //implicit router
+      create.setRouterField("shard_s");
+    } else {//composite id router
+      if (random().nextBoolean()) {
+        create.setRouterField("shard_s");
+      }
+    }
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    create.process(solrClient);
+
+    indexDocs(getCollectionName());
+
+    if (doSplitShardOperation) {
+      // shard split the first shard
+      int prevActiveSliceCount = getActiveSliceCount(getCollectionName());
+      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName());
+      splitShard.setShardName("shard1");
+      splitShard.process(solrClient);
+      // wait until we see one more active slice...
+      for (int i = 0; getActiveSliceCount(getCollectionName()) != prevActiveSliceCount + 1; i++) {
+        assertTrue(i < 30);
+        Thread.sleep(500);
+      }
+      // issue a hard commit.  Split shard does a soft commit which isn't good enough for the backup/snapshooter to see
+      solrClient.commit(getCollectionName());
+    }
+
+    testBackupAndRestore(getCollectionName());
+    testConfigBackupOnly("conf1", getCollectionName());
+    testInvalidPath(getCollectionName());
+  }
+
+  /**
+   * This test validates the backup of collection configuration using
+   *  {@linkplain CollectionAdminParams#NO_INDEX_BACKUP_STRATEGY}.
+   *
+   * @param configName The config name for the collection to be backed up.
+   * @param collectionName The name of the collection to be backed up.
+   * @throws Exception in case of errors.
+   */
+  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
+    // This is deliberately no-op since we want to run this test only for one of the backup repository
+    // implementation (mainly to avoid redundant test execution). Currently HDFS backup repository test
+    // implements this.
+  }
+
+  // This test verifies the system behavior when the backup location cluster property is configured with an invalid
+  // value for the specified repository (and the default backup location is not configured in solr.xml).
+  private void testInvalidPath(String collectionName) throws Exception {
+    // Execute this test only if the default backup location is NOT configured in solr.xml
+    if (getBackupLocation() == null) {
+      return;
+    }
+
+    String backupName = "invalidbackuprequest";
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    ClusterProp req = CollectionAdminRequest.setClusterProperty(CoreAdminParams.BACKUP_LOCATION, "/location/does/not/exist");
+    assertEquals(0, req.process(solrClient).getStatus());
+
+    // Do not specify the backup location.
+    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+        .setRepositoryName(getBackupRepoName());
+    try {
+      backup.process(solrClient);
+      fail("This request should have failed since the cluster property value for backup location property is invalid.");
+    } catch (SolrException ex) {
+      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
+    }
+
+    String restoreCollectionName = collectionName + "_invalidrequest";
+    CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
+        .setRepositoryName(getBackupRepoName());
+    try {
+      restore.process(solrClient);
+      fail("This request should have failed since the cluster property value for backup location property is invalid.");
+    } catch (SolrException ex) {
+      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
+    }
+  }
+
+  private int getActiveSliceCount(String collectionName) {
+    return cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName).getActiveSlices().size();
+  }
+
+  private void indexDocs(String collectionName) throws Exception {
+    Random random = new Random(docsSeed);// use a constant seed for the whole test run so that we can easily re-index.
+    int numDocs = random.nextInt(100);
+    if (numDocs == 0) {
+      log.info("Indexing ZERO test docs");
+      return;
+    }
+    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
+    for (int i=0; i<numDocs; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", i);
+      doc.addField("shard_s", "shard" + (1 + random.nextInt(NUM_SHARDS))); // for implicit router
+      docs.add(doc);
+    }
+    CloudSolrClient client = cluster.getSolrClient();
+    client.add(collectionName, docs);// batch
+    client.commit(collectionName);
+  }
+
+  private void testBackupAndRestore(String collectionName) throws Exception {
+    String backupLocation = getBackupLocation();
+    String backupName = "mytestbackup";
+
+    CloudSolrClient client = cluster.getSolrClient();
+    DocCollection backupCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
+
+    Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
+    assert origShardToDocCount.isEmpty() == false;
+
+    log.info("Triggering Backup command");
+
+    {
+      CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
+      if (random().nextBoolean()) {
+        assertEquals(0, backup.process(client).getStatus());
+      } else {
+        assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(client, 30));//async
+      }
+    }
+
+    log.info("Triggering Restore command");
+
+    String restoreCollectionName = collectionName + "_restored";
+    boolean sameConfig = random().nextBoolean();
+
+    {
+      CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
+          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
+
+
+      //explicitly specify the replicationFactor/pullReplicas/nrtReplicas/tlogReplicas .
+      //Value is still the same as the original. maybe test with different values that the original for better test coverage
+      if (random().nextBoolean())  {
+        restore.setReplicationFactor(replFactor);
+      }
+      if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
+        // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
+        restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
+      }
+      
+
+      if (rarely()) { // Try with createNodeSet configuration
+        int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
+        List<String> nodeStrs = new ArrayList<>(nodeSetSize);
+        Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
+        for (int i = 0; i < nodeSetSize ; i++) {
+          nodeStrs.add(iter.next().getNodeName());
+        }
+        restore.setCreateNodeSet(String.join(",", nodeStrs));
+        restore.setCreateNodeSetShuffle(usually());
+        // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
+        if (restore.getMaxShardsPerNode() != null) {
+          restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
+        } else {
+          restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
+        }
+      }
+
+      Properties props = new Properties();
+      props.setProperty("customKey", "customVal");
+      restore.setProperties(props);
+
+      if (sameConfig==false) {
+        restore.setConfigName("customConfigName");
+      }
+      if (random().nextBoolean()) {
+        assertEquals(0, restore.process(client).getStatus());
+      } else {
+        assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 30));//async
+      }
+      AbstractDistribZkTestBase.waitForRecoveriesToFinish(
+          restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
+    }
+
+    //Check the number of results are the same
+    DocCollection restoreCollection = client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
+    assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
+    //Re-index same docs (should be identical docs given same random seed) and test we have the same result.  Helps
+    //  test we reconstituted the hash ranges / doc router.
+    if (!(restoreCollection.getRouter() instanceof ImplicitDocRouter) && random().nextBoolean()) {
+      indexDocs(restoreCollectionName);
+      assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
+    }
+
+    assertEquals(backupCollection.getReplicationFactor(), restoreCollection.getReplicationFactor());
+    assertEquals(backupCollection.getAutoAddReplicas(), restoreCollection.getAutoAddReplicas());
+    assertEquals(backupCollection.getActiveSlices().iterator().next().getReplicas().size(),
+        restoreCollection.getActiveSlices().iterator().next().getReplicas().size());
+    assertEquals(sameConfig ? "conf1" : "customConfigName",
+        cluster.getSolrClient().getZkStateReader().readConfigName(restoreCollectionName));
+
+    Map<String, Integer> numReplicasByNodeName = new HashMap<>();
+    restoreCollection.getReplicas().forEach(x -> {
+      numReplicasByNodeName.put(x.getNodeName(), numReplicasByNodeName.getOrDefault(x.getNodeName(), 0) + 1);
+    });
+    numReplicasByNodeName.forEach((k, v) -> {
+      assertTrue("Node " + k + " has " + v + " replicas. Expected num replicas : " + restoreCollection.getMaxShardsPerNode() ,
+          v <= restoreCollection.getMaxShardsPerNode());
+    });
+
+    assertEquals("Different count of nrtReplicas. Backup collection state=" + backupCollection + "\nRestore " +
+        "collection state=" + restoreCollection, replFactor, restoreCollection.getNumNrtReplicas().intValue());
+    assertEquals("Different count of pullReplicas. Backup collection state=" + backupCollection + "\nRestore" +
+        " collection state=" + restoreCollection, numPullReplicas, restoreCollection.getNumPullReplicas().intValue());
+    assertEquals("Different count of TlogReplica. Backup collection state=" + backupCollection + "\nRestore" +
+        " collection state=" + restoreCollection, numTlogReplicas, restoreCollection.getNumTlogReplicas().intValue());
+
+    assertEquals("Restore collection should use stateFormat=2", 2, restoreCollection.getStateFormat());
+
+
+    // assert added core properties:
+    // DWS: did via manual inspection.
+    // TODO Find the applicable core.properties on the file system but how?
+  }
+
+  private Map<String, Integer> getShardToDocCountMap(CloudSolrClient client, DocCollection docCollection) throws SolrServerException, IOException {
+    Map<String,Integer> shardToDocCount = new TreeMap<>();
+    for (Slice slice : docCollection.getActiveSlices()) {
+      String shardName = slice.getName();
+      try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(slice.getLeader().getCoreUrl()).withHttpClient(client.getHttpClient()).build()) {
+        long docsInShard = leaderClient.query(new SolrQuery("*:*").setParam("distrib", "false"))
+            .getResults().getNumFound();
+        shardToDocCount.put(shardName, (int) docsInShard);
+      }
+    }
+    return shardToDocCount;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
new file mode 100644
index 0000000..d2b35e4
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
+import org.apache.solr.cloud.ZkTestServer;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class AssignTest extends SolrTestCaseJ4 {
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  @Test
+  public void testAssignNode() throws Exception {
+    assumeWorkingMockito();
+    
+    SolrZkClient zkClient = mock(SolrZkClient.class);
+    Map<String, byte[]> zkClientData = new HashMap<>();
+    when(zkClient.setData(anyString(), any(), anyInt(), anyBoolean())).then(invocation -> {
+        zkClientData.put(invocation.getArgument(0), invocation.getArgument(1));
+        return null;
+      }
+    );
+    when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
+        zkClientData.get(invocation.getArgument(0)));
+    // TODO: fix this to be independent of ZK
+    ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+    String nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node1", nodeName);
+    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node1", nodeName);
+    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node2", nodeName);
+  }
+
+  @Test
+  public void testIdIsUnique() throws Exception {
+    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
+    ZkTestServer server = new ZkTestServer(zkDir);
+    Object fixedValue = new Object();
+    String[] collections = new String[]{"c1","c2","c3","c4","c5","c6","c7","c8","c9"};
+    Map<String, ConcurrentHashMap<Integer, Object>> collectionUniqueIds = new HashMap<>();
+    for (String c : collections) {
+      collectionUniqueIds.put(c, new ConcurrentHashMap<>());
+    }
+
+    ExecutorService executor = ExecutorUtil.newMDCAwareCachedThreadPool("threadpool");
+    try {
+      server.run();
+
+      try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
+        assertTrue(zkClient.isConnected());
+        zkClient.makePath("/", true);
+        for (String c : collections) {
+          zkClient.makePath("/collections/"+c, true);
+        }
+        // TODO: fix this to be independent of ZK
+        ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+        List<Future<?>> futures = new ArrayList<>();
+        for (int i = 0; i < 1000; i++) {
+          futures.add(executor.submit(() -> {
+            String collection = collections[random().nextInt(collections.length)];
+            int id = Assign.incAndGetId(stateManager, collection, 0);
+            Object val = collectionUniqueIds.get(collection).put(id, fixedValue);
+            if (val != null) {
+              fail("ZkController do not generate unique id for " + collection);
+            }
+          }));
+        }
+        for (Future<?> future : futures) {
+          future.get();
+        }
+      }
+      assertEquals(1000, (long) collectionUniqueIds.values().stream()
+          .map(ConcurrentHashMap::size)
+          .reduce((m1, m2) -> m1 + m2).get());
+    } finally {
+      server.shutdown();
+      ExecutorUtil.shutdownAndAwaitTermination(executor);
+    }
+  }
+
+
+  @Test
+  public void testBuildCoreName() throws IOException, InterruptedException, KeeperException {
+    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
+    ZkTestServer server = new ZkTestServer(zkDir);
+    server.run();
+    try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
+      zkClient.makePath("/", true);
+      // TODO: fix this to be independent of ZK
+      ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+      Map<String, Slice> slices = new HashMap<>();
+      slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
+      slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
+
+      DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
+      assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
+      assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildSolrCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
+    } finally {
+      server.shutdown();
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
new file mode 100644
index 0000000..c084412
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.util.RetryUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Verifies cluster state remains consistent after collection reload.
+ */
+@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
+public class CollectionReloadTest extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(1)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+  
+  @Test
+  public void testReloadedLeaderStateAfterZkSessionLoss() throws Exception {
+
+    log.info("testReloadedLeaderStateAfterZkSessionLoss initialized OK ... running test logic");
+
+    final String testCollectionName = "c8n_1x1";
+    CollectionAdminRequest.createCollection(testCollectionName, "conf", 1, 1)
+        .process(cluster.getSolrClient());
+
+    Replica leader
+        = cluster.getSolrClient().getZkStateReader().getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
+
+    long coreStartTime = getCoreStatus(leader).getCoreStartTime().getTime();
+    CollectionAdminRequest.reloadCollection(testCollectionName).process(cluster.getSolrClient());
+
+    RetryUtil.retryUntil("Timed out waiting for core to reload", 30, 1000, TimeUnit.MILLISECONDS, () -> {
+      long restartTime = 0;
+      try {
+        restartTime = getCoreStatus(leader).getCoreStartTime().getTime();
+      } catch (Exception e) {
+        log.warn("Exception getting core start time: {}", e.getMessage());
+        return false;
+      }
+      return restartTime > coreStartTime;
+    });
+
+    final int initialStateVersion = getCollectionState(testCollectionName).getZNodeVersion();
+
+    cluster.expireZkSession(cluster.getReplicaJetty(leader));
+
+    waitForState("Timed out waiting for core to re-register as ACTIVE after session expiry", testCollectionName, (n, c) -> {
+      log.info("Collection state: {}", c.toString());
+      Replica expiredReplica = c.getReplica(leader.getName());
+      return expiredReplica.getState() == Replica.State.ACTIVE && c.getZNodeVersion() > initialStateVersion;
+    });
+
+    log.info("testReloadedLeaderStateAfterZkSessionLoss succeeded ... shutting down now!");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
new file mode 100644
index 0000000..f68fa9e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(3)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+
+  @Before
+  public void deleteCollections() throws Exception {
+    cluster.deleteAllCollections();
+  }
+
+  @Test
+  public void testAddTooManyReplicas() throws Exception {
+    final String collectionName = "TooManyReplicasInSeveralFlavors";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setMaxShardsPerNode(1)
+        .process(cluster.getSolrClient());
+
+    // I have two replicas, one for each shard
+
+    // Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
+    // Just get the first node any way we can.
+    // Get a node to use for the "node" parameter.
+    String nodeName = getAllNodeNames(collectionName).get(0);
+
+    // Add a replica using the "node" parameter (no "too many replicas check")
+    // this node should have 2 replicas on it
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
+
+    // Three replicas so far, should be able to create another one "normally"
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
+
+    // This one should fail though, no "node" parameter specified
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+
+    // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
+    // TODO: Isn't this a bug?
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty("name", "bogus2")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    Slice slice = collectionState.getSlice("shard1");
+    Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
+    assertNotNull("Should have found a replica named 'bogus2'", replica);
+    assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
+
+    // Shard1 should have 4 replicas
+    assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
+
+    // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
+    // than simple calcs would indicate.
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
+    waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
+      for (Replica r : c.getReplicas()) {
+        if (r.getState() != Replica.State.ACTIVE)
+          return false;
+      }
+      return true;
+    });
+  }
+
+  @Test
+  public void testAddShard() throws Exception {
+
+    String collectionName = "TooManyReplicasWhenAddingShards";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
+        .setMaxShardsPerNode(2)
+        .process(cluster.getSolrClient());
+
+    // We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
+    // two replicas, but fail on the third.
+    CollectionAdminRequest.createShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
+
+    // Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
+    CollectionAdminRequest.createShard(collectionName, "shard2")
+        .process(cluster.getSolrClient());
+
+    // Now fail to add the third as it should exceed maxShardsPerNode
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard3")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
+    List<String> nodes = getAllNodeNames(collectionName);
+
+    CollectionAdminRequest.createShard(collectionName, "shard4")
+        .setNodeSet(StringUtils.join(nodes, ","))
+        .process(cluster.getSolrClient());
+
+    // And just for yucks, insure we fail the "regular" one again.
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard5")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
+    // should have exactly two replicas
+    waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
+      return DocCollection.isFullyActive(n, c, 4, 2);
+    });
+    Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
+    assertEquals("There should be exaclty four slices", slices.size(), 4);
+    assertNotNull("shardstart should exist", slices.get("shardstart"));
+    assertNotNull("shard1 should exist", slices.get("shard1"));
+    assertNotNull("shard2 should exist", slices.get("shard2"));
+    assertNotNull("shard4 should exist", slices.get("shard4"));
+    assertEquals("Shardstart should have exactly 2 replicas", 2, slices.get("shardstart").getReplicas().size());
+    assertEquals("Shard1 should have exactly 2 replicas", 2, slices.get("shard1").getReplicas().size());
+    assertEquals("Shard2 should have exactly 2 replicas", 2, slices.get("shard2").getReplicas().size());
+    assertEquals("Shard4 should have exactly 2 replicas", 2, slices.get("shard4").getReplicas().size());
+
+  }
+
+  @Test
+  public void testDownedShards() throws Exception {
+    String collectionName = "TooManyReplicasWhenAddingDownedNode";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
+        .setMaxShardsPerNode(2)
+        .process(cluster.getSolrClient());
+
+    // Shut down a Jetty, I really don't care which
+    JettySolrRunner jetty = cluster.getRandomJetty(random());
+    String deadNode = jetty.getBaseUrl().toString();
+    cluster.stopJettySolrRunner(jetty);
+
+    try {
+
+      // Adding a replica on a dead node should fail
+      Exception e1 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
+            .setNode(deadNode)
+            .process(cluster.getSolrClient());
+      });
+      assertTrue("Should have gotten a message about shard not currently active: " + e1.toString(),
+          e1.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
+
+      // Should also die if we just add a shard
+      Exception e2 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.createShard(collectionName, "shard1")
+            .setNodeSet(deadNode)
+            .process(cluster.getSolrClient());
+      });
+
+      assertTrue("Should have gotten a message about shard not currently active: " + e2.toString(),
+          e2.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
+    }
+    finally {
+      cluster.startJettySolrRunner(jetty);
+    }
+  }
+
+  private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
+    DocCollection state = getCollectionState(collectionName);
+    return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
+  }
+
+}


[29/41] lucene-solr:jira/solr-11702: SOLR-11810: Upgrade Jetty to 9.4.8

Posted by da...@apache.org.
SOLR-11810: Upgrade Jetty to 9.4.8


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2900bb59
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2900bb59
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2900bb59

Branch: refs/heads/jira/solr-11702
Commit: 2900bb597db4e312fbfe828a77ba11026866ae86
Parents: 03095ce
Author: Erick Erickson <er...@apache.org>
Authored: Wed Jan 17 11:33:22 2018 -0800
Committer: Erick Erickson <er...@apache.org>
Committed: Wed Jan 17 11:33:22 2018 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                | 4 ++--
 solr/server/etc/jetty-http.xml  | 1 -
 solr/server/etc/jetty-https.xml | 1 -
 3 files changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2900bb59/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2179602..a6b2415 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -92,8 +92,6 @@ New Features
 * SOLR-11854: multivalued primitive fields can now be sorted by implicitly choosing the min/max
   value for asc/desc sort orders. (hossman)
 
-* SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
-
 * SOLR-11592: Add OpenNLP language detection to the langid contrib. (Koji, Steve Rowe)
 
 Bug Fixes
@@ -156,6 +154,8 @@ Other Changes
 
 * SOLR-11817: Move Collections API classes to it's own package (Varun Thacker)
 
+* SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
+
 ==================  7.2.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2900bb59/solr/server/etc/jetty-http.xml
----------------------------------------------------------------------
diff --git a/solr/server/etc/jetty-http.xml b/solr/server/etc/jetty-http.xml
index 6d92830..171a7e6 100644
--- a/solr/server/etc/jetty-http.xml
+++ b/solr/server/etc/jetty-http.xml
@@ -38,7 +38,6 @@
         <Set name="idleTimeout"><Property name="solr.jetty.http.idleTimeout" default="120000"/></Set>
         <Set name="soLingerTime"><Property name="solr.jetty.http.soLingerTime" default="-1"/></Set>
         <Set name="acceptorPriorityDelta"><Property name="solr.jetty.http.acceptorPriorityDelta" default="0"/></Set>
-        <Set name="selectorPriorityDelta"><Property name="solr.jetty.http.selectorPriorityDelta" default="0"/></Set>
         <Set name="acceptQueueSize"><Property name="solr.jetty.http.acceptQueueSize" default="0"/></Set>
       </New>
     </Arg>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2900bb59/solr/server/etc/jetty-https.xml
----------------------------------------------------------------------
diff --git a/solr/server/etc/jetty-https.xml b/solr/server/etc/jetty-https.xml
index d34d4bd..32685ff 100644
--- a/solr/server/etc/jetty-https.xml
+++ b/solr/server/etc/jetty-https.xml
@@ -45,7 +45,6 @@
         <Set name="idleTimeout"><Property name="solr.jetty.https.timeout" default="120000"/></Set>
         <Set name="soLingerTime"><Property name="solr.jetty.https.soLingerTime" default="-1"/></Set>
         <Set name="acceptorPriorityDelta"><Property name="solr.jetty.ssl.acceptorPriorityDelta" default="0"/></Set>
-        <Set name="selectorPriorityDelta"><Property name="solr.jetty.ssl.selectorPriorityDelta" default="0"/></Set>
         <Set name="acceptQueueSize"><Property name="solr.jetty.https.acceptQueueSize" default="0"/></Set>
       </New>
     </Arg>


[09/41] lucene-solr:jira/solr-11702: SOLR-11854: multivalued primative fields can now be sorted by implicitly choosing the min/max value for asc/desc sort orders

Posted by da...@apache.org.
SOLR-11854: multivalued primative fields can now be sorted by implicitly choosing the min/max value for asc/desc sort orders


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e2bba98d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e2bba98d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e2bba98d

Branch: refs/heads/jira/solr-11702
Commit: e2bba98dfde0420da84eb740282966ee5624b4d1
Parents: d99799c
Author: Chris Hostetter <ho...@apache.org>
Authored: Tue Jan 16 11:57:44 2018 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Tue Jan 16 11:57:44 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../apache/solr/schema/AbstractEnumField.java   |  24 +-
 .../org/apache/solr/schema/DatePointField.java  |   6 -
 .../apache/solr/schema/DoublePointField.java    |   6 -
 .../org/apache/solr/schema/EnumFieldType.java   |  11 +
 .../java/org/apache/solr/schema/FieldType.java  |  80 +++-
 .../org/apache/solr/schema/FloatPointField.java |   6 -
 .../org/apache/solr/schema/IntPointField.java   |   6 -
 .../org/apache/solr/schema/LongPointField.java  |   6 -
 .../java/org/apache/solr/schema/NumberType.java |  34 +-
 .../java/org/apache/solr/schema/PointField.java |   6 +
 .../apache/solr/schema/PrimitiveFieldType.java  |   5 +
 .../org/apache/solr/schema/SchemaField.java     |   8 +-
 .../java/org/apache/solr/schema/StrField.java   |  28 ++
 .../java/org/apache/solr/schema/TrieField.java  |  39 +-
 .../solr/collection1/conf/schema11.xml          |  25 ++
 .../org/apache/solr/schema/TestPointFields.java | 164 ++++++--
 .../function/TestMinMaxOnMultiValuedField.java  | 409 ++++++++++++++++++-
 .../src/common-query-parameters.adoc            |  22 +-
 19 files changed, 767 insertions(+), 121 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 4fd3ff1..45a9a59 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -89,6 +89,9 @@ New Features
 
 * SOLR-11064: Collection APIs should use the disk space hint when using policy framework  (noble)
 
+* SOLR-11854: multivalued primative fields can now be sorted by implicitly choosing the min/max
+  value for asc/desc sort orders. (hossman)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/AbstractEnumField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/AbstractEnumField.java b/solr/core/src/java/org/apache/solr/schema/AbstractEnumField.java
index d4ce268..06f3c32 100644
--- a/solr/core/src/java/org/apache/solr/schema/AbstractEnumField.java
+++ b/solr/core/src/java/org/apache/solr/schema/AbstractEnumField.java
@@ -250,14 +250,32 @@ public abstract class AbstractEnumField extends PrimitiveFieldType {
 
   @Override
   public SortField getSortField(SchemaField field, boolean top) {
-    SortField result = getSortField(field, SortField.Type.INT, top, Integer.MIN_VALUE, Integer.MAX_VALUE);
+    if (field.multiValued()) {
+      MultiValueSelector selector = field.type.getDefaultMultiValueSelectorForSort(field, top);
+      if (null != selector) {
+        final SortField result = getSortedSetSortField(field, selector.getSortedSetSelectorType(),
+                                                       // yes: Strings, it's how SortedSetSortField works
+                                                       top, SortField.STRING_FIRST, SortField.STRING_LAST);
+        if (null == result.getMissingValue()) {
+          // special case 'enum' default behavior: assume missing values are "below" all enum values
+          result.setMissingValue(SortField.STRING_FIRST);
+        }
+        return result;
+      }
+    }
+    
+    // else...
+    // either single valued, or don't support implicit multi selector
+    // (in which case let getSortField() give the error)
+    final SortField result = getSortField(field, SortField.Type.INT, top, Integer.MIN_VALUE, Integer.MAX_VALUE);
+    
     if (null == result.getMissingValue()) {
-      // special case default behavior: assume missing values are "below" all enum values
+      // special case 'enum' default behavior: assume missing values are "below" all enum values
       result.setMissingValue(Integer.MIN_VALUE);
     }
     return result;
   }
-
+  
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/DatePointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/DatePointField.java b/solr/core/src/java/org/apache/solr/schema/DatePointField.java
index 4861917..2bbe4ad 100644
--- a/solr/core/src/java/org/apache/solr/schema/DatePointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/DatePointField.java
@@ -29,7 +29,6 @@ import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.queries.function.valuesource.MultiValuedLongFieldSource;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedNumericSelector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
@@ -189,11 +188,6 @@ public class DatePointField extends PointField implements DateValueFieldType {
   }
 
   @Override
-  public SortField getSortField(SchemaField field, boolean top) {
-    return getSortField(field, SortField.Type.LONG, top, Long.MIN_VALUE, Long.MAX_VALUE);
-  }
-
-  @Override
   public UninvertingReader.Type getUninversionType(SchemaField sf) {
     if (sf.multiValued()) {
       return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/DoublePointField.java b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
index ba71a8a..3b68ece 100644
--- a/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
@@ -27,7 +27,6 @@ import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
 import org.apache.lucene.queries.function.valuesource.MultiValuedDoubleFieldSource;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedNumericSelector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
@@ -133,11 +132,6 @@ public class DoublePointField extends PointField implements DoubleValueFieldType
   }
 
   @Override
-  public SortField getSortField(SchemaField field, boolean top) {
-    return getSortField(field, SortField.Type.DOUBLE, top, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
-  }
-
-  @Override
   public Type getUninversionType(SchemaField sf) {
     if (sf.multiValued()) {
       return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/EnumFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/EnumFieldType.java b/solr/core/src/java/org/apache/solr/schema/EnumFieldType.java
index 4bda823..5b76d48 100644
--- a/solr/core/src/java/org/apache/solr/schema/EnumFieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/EnumFieldType.java
@@ -32,6 +32,7 @@ import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortedNumericSelector;
 import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.SortField;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.CharsRef;
@@ -210,4 +211,14 @@ public class EnumFieldType extends AbstractEnumField {
     }
     return new MultiValuedIntFieldSource(field.getName(), selectorType);
   }
+
+  @Override
+  public SortField getSortField(SchemaField field, boolean top) {
+    final SortField result = getNumericSort(field, NumberType.INTEGER, top);
+    if (null == result.getMissingValue()) {
+      // special case 'enum' default behavior: assume missing values are "below" all enum values
+      result.setMissingValue(Integer.MIN_VALUE);
+    }
+    return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/FieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index 31ef6ec..9dcca24 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -47,10 +47,11 @@ import org.apache.lucene.search.DocValuesRewriteMethod;
 import org.apache.lucene.search.MultiTermQuery;
 import org.apache.lucene.search.PrefixQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.SortedSetSortField;
 import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.SortedNumericSortField;
 import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.search.SortedSetSortField;
+import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.similarities.Similarity;
@@ -662,7 +663,8 @@ public abstract class FieldType extends FieldProperties {
    * Returns the SortField instance that should be used to sort fields
    * of this type.
    * @see SchemaField#checkSortability
-   * @see #getSortField(SchemaField,SortField.Type,boolean,Object,Object)
+   * @see #getStringSort
+   * @see #getNumericSort
    */
   public abstract SortField getSortField(SchemaField field, boolean top);
 
@@ -703,13 +705,26 @@ public abstract class FieldType extends FieldProperties {
                                                    boolean reverse, Object missingLow, Object missingHigh) {
                                                    
     field.checkSortability();
-
     SortField sf = new SortedSetSortField(field.getName(), reverse, selector);
     applySetMissingValue(field, sf, missingLow, missingHigh);
     
     return sf;
   }
   
+  /**
+   * Same as {@link #getSortField} but using {@link SortedNumericSortField}.
+   */
+  protected static SortField getSortedNumericSortField(SchemaField field, SortField.Type sortType,
+                                                       SortedNumericSelector.Type selector,
+                                                       boolean reverse, Object missingLow, Object missingHigh) {
+                                                   
+    field.checkSortability();
+    SortField sf = new SortedNumericSortField(field.getName(), sortType, reverse, selector);
+    applySetMissingValue(field, sf, missingLow, missingHigh);
+    
+    return sf;
+  }
+  
   /** 
    * @see #getSortField 
    * @see #getSortedSetSortField 
@@ -729,11 +744,49 @@ public abstract class FieldType extends FieldProperties {
    * Utility usable by subclasses when they want to get basic String sorting
    * using common checks.
    * @see SchemaField#checkSortability
+   * @see #getSortedSetSortField
+   * @see #getSortField
    */
   protected SortField getStringSort(SchemaField field, boolean reverse) {
+    if (field.multiValued()) {
+      MultiValueSelector selector = field.type.getDefaultMultiValueSelectorForSort(field, reverse);
+      if (null != selector) {
+        return getSortedSetSortField(field, selector.getSortedSetSelectorType(),
+                                     reverse, SortField.STRING_FIRST, SortField.STRING_LAST);
+      }
+    }
+    
+    // else...
+    // either single valued, or don't support implicit multi selector
+    // (in which case let getSortField() give the error)
     return getSortField(field, SortField.Type.STRING, reverse, SortField.STRING_FIRST, SortField.STRING_LAST);
   }
 
+  /**
+   * Utility usable by subclasses when they want to get basic Numeric sorting
+   * using common checks.
+   *
+   * @see SchemaField#checkSortability
+   * @see #getSortedNumericSortField
+   * @see #getSortField
+   */
+  protected SortField getNumericSort(SchemaField field, NumberType type, boolean reverse) {
+    if (field.multiValued()) {
+      MultiValueSelector selector = field.type.getDefaultMultiValueSelectorForSort(field, reverse);
+      if (null != selector) {
+        return getSortedNumericSortField(field, type.sortType, selector.getSortedNumericSelectorType(),
+                                         reverse, type.sortMissingLow, type.sortMissingHigh);
+      }
+    }
+    
+    // else...
+    // either single valued, or don't support implicit multi selector
+    // (in which case let getSortField() give the error)
+    return getSortField(field, type.sortType, reverse, type.sortMissingLow, type.sortMissingHigh);
+  }
+
+  
+
   /** called to get the default value source (normally, from the
    *  Lucene FieldCache.)
    */
@@ -760,8 +813,23 @@ public abstract class FieldType extends FieldProperties {
     
     throw new SolrException(ErrorCode.BAD_REQUEST, "Selecting a single value from a multivalued field is not supported for this field: " + field.getName() + " (type: " + this.getTypeName() + ")");
   }
-
-
+  
+  /**
+   * Method for indicating which {@link MultiValueSelector} (if any) should be used when
+   * sorting on a multivalued field of this type for the specified direction (asc/desc).  
+   * The default implementation returns <code>null</code> (for all inputs).
+   *
+   * @param field The SchemaField (of this type) in question
+   * @param reverse false if this is an ascending sort, true if this is a descending sort.
+   * @return the implicit selector to use for this direction, or null if implicit sorting on the specified direction is not supported and should return an error.
+   * @see MultiValueSelector
+   */
+  public MultiValueSelector getDefaultMultiValueSelectorForSort(SchemaField field, boolean reverse) {
+    // trivial base case
+    return null;
+  }
+  
+  
   
   /**
    * Returns a Query instance for doing range searches on this field type. {@link org.apache.solr.search.SolrQueryParser}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FloatPointField.java b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
index f69a1db..68155f4 100644
--- a/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
@@ -27,7 +27,6 @@ import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.MultiValuedFloatFieldSource;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedNumericSelector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
@@ -133,11 +132,6 @@ public class FloatPointField extends PointField implements FloatValueFieldType {
   }
 
   @Override
-  public SortField getSortField(SchemaField field, boolean top) {
-    return getSortField(field, SortField.Type.FLOAT, top, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY);
-  }
-
-  @Override
   public Type getUninversionType(SchemaField sf) {
     if (sf.multiValued()) {
       return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/IntPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/IntPointField.java b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
index b179c57..a43639c 100644
--- a/solr/core/src/java/org/apache/solr/schema/IntPointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
@@ -27,7 +27,6 @@ import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.MultiValuedIntFieldSource;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedNumericSelector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
@@ -131,11 +130,6 @@ public class IntPointField extends PointField implements IntValueFieldType {
   }
 
   @Override
-  public SortField getSortField(SchemaField field, boolean top) {
-    return getSortField(field, SortField.Type.INT, top, Integer.MIN_VALUE, Integer.MAX_VALUE);
-  }
-
-  @Override
   public Type getUninversionType(SchemaField sf) {
     if (sf.multiValued()) {
       return null; 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/LongPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/LongPointField.java b/solr/core/src/java/org/apache/solr/schema/LongPointField.java
index 547725b..d5a5072 100644
--- a/solr/core/src/java/org/apache/solr/schema/LongPointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/LongPointField.java
@@ -27,7 +27,6 @@ import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.queries.function.valuesource.MultiValuedLongFieldSource;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.SortField;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.solr.search.QParser;
@@ -130,11 +129,6 @@ public class LongPointField extends PointField implements LongValueFieldType {
   }
 
   @Override
-  public SortField getSortField(SchemaField field, boolean top) {
-    return getSortField(field, SortField.Type.LONG, top, Long.MIN_VALUE, Long.MAX_VALUE);
-  }
-
-  @Override
   public Type getUninversionType(SchemaField sf) {
     if (sf.multiValued()) {
       return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/NumberType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/NumberType.java b/solr/core/src/java/org/apache/solr/schema/NumberType.java
index 2253d67..8f41b6c 100644
--- a/solr/core/src/java/org/apache/solr/schema/NumberType.java
+++ b/solr/core/src/java/org/apache/solr/schema/NumberType.java
@@ -16,10 +16,34 @@
  */
 package org.apache.solr.schema;
 
+import org.apache.lucene.search.SortField;
+
 public enum NumberType {
-  INTEGER,
-  LONG,
-  FLOAT,
-  DOUBLE,
-  DATE
+  INTEGER(SortField.Type.INT, Integer.MIN_VALUE, Integer.MAX_VALUE),
+  LONG(SortField.Type.LONG, Long.MIN_VALUE, Long.MAX_VALUE),
+  FLOAT(SortField.Type.FLOAT, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY),
+  DOUBLE(SortField.Type.DOUBLE, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY),
+  DATE(SortField.Type.LONG, Long.MIN_VALUE, Long.MAX_VALUE);
+
+  /** The SortField type that corrisponds with this NumberType */
+  public final SortField.Type sortType;
+  /** 
+   * The effective value to use when sorting on this field should result in docs w/o a value 
+   * sorting "low" (which may be "first" or "last" depending on sort direction) 
+   * @see SortField#setMissingValue
+   */
+  public final Object sortMissingLow;
+  /** 
+   * The effective value to use when sorting on this field should result in docs w/o a value 
+   * sorting "low" (which may be "first" or "last" depending on sort direction) 
+   * @see SortField#setMissingValue
+   */
+  public final Object sortMissingHigh;
+  
+  private NumberType(SortField.Type sortType, Object sortMissingLow, Object sortMissingHigh) {
+    this.sortType = sortType;
+    this.sortMissingLow = sortMissingLow;
+    this.sortMissingHigh = sortMissingHigh;
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/PointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/PointField.java b/solr/core/src/java/org/apache/solr/schema/PointField.java
index 09d0175..91a342c 100644
--- a/solr/core/src/java/org/apache/solr/schema/PointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/PointField.java
@@ -33,6 +33,7 @@ import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.IndexOrDocValuesQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortedNumericSelector;
+import org.apache.lucene.search.SortField;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.CharsRef;
@@ -294,4 +295,9 @@ public abstract class PointField extends NumericFieldType {
 
   protected abstract StoredField getStoredField(SchemaField sf, Object value);
 
+  @Override
+  public SortField getSortField(SchemaField field, boolean top) {
+    return getNumericSort(field, getNumberType(), top);
+  }
+  
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/PrimitiveFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/PrimitiveFieldType.java b/solr/core/src/java/org/apache/solr/schema/PrimitiveFieldType.java
index 9d9da47..0c0042e 100644
--- a/solr/core/src/java/org/apache/solr/schema/PrimitiveFieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/PrimitiveFieldType.java
@@ -37,4 +37,9 @@ public abstract class PrimitiveFieldType extends FieldType {
   @Override
   protected void checkSupportsDocValues() { // primitive types support DocValues
   }
+
+  @Override
+  public MultiValueSelector getDefaultMultiValueSelectorForSort(SchemaField field, boolean reverse) {
+    return reverse ? MultiValueSelector.MAX : MultiValueSelector.MIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/SchemaField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaField.java b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
index c2e8cca..256cbae 100644
--- a/solr/core/src/java/org/apache/solr/schema/SchemaField.java
+++ b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
@@ -161,10 +161,14 @@ public final class SchemaField extends FieldProperties implements IndexableField
    * @see FieldType#getSortField
    */
   public void checkSortability() throws SolrException {
-    if ( multiValued() ) {
+    if ( multiValued()
+         // if either of these are non-null, then we should not error
+         && null == this.type.getDefaultMultiValueSelectorForSort(this,true)
+         && null == this.type.getDefaultMultiValueSelectorForSort(this,false) ) {
+      
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
                               "can not sort on multivalued field: " 
-                              + getName());
+                              + getName() + " of type: " + this.type.getTypeName());
     }
     if (! hasDocValues() ) {
       if ( ! ( indexed() && null != this.type.getUninversionType(this) ) ) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/StrField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/StrField.java b/solr/core/src/java/org/apache/solr/schema/StrField.java
index 3294b04..d9b51d1 100644
--- a/solr/core/src/java/org/apache/solr/schema/StrField.java
+++ b/solr/core/src/java/org/apache/solr/schema/StrField.java
@@ -26,8 +26,11 @@ import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.SortedSetSelector;
 import org.apache.lucene.util.BytesRef;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
 import org.apache.solr.uninverting.UninvertingReader.Type;
@@ -104,6 +107,31 @@ public class StrField extends PrimitiveFieldType {
   public Object unmarshalSortValue(Object value) {
     return unmarshalStringSortValue(value);
   }
+
+  @Override
+  public ValueSource getSingleValueSource(MultiValueSelector choice, SchemaField field, QParser parser) {
+    // trivial base case
+    if (!field.multiValued()) {
+      // single value matches any selector
+      return getValueSource(field, parser);
+    }
+    
+    // See LUCENE-6709
+    if (! field.hasDocValues()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                              "docValues='true' is required to select '" + choice.toString() +
+                              "' value from multivalued field ("+ field.getName() +") at query time");
+    }
+    SortedSetSelector.Type selectorType = choice.getSortedSetSelectorType();
+    if (null == selectorType) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                              choice.toString() + " is not a supported option for picking a single value"
+                              + " from the multivalued field: " + field.getName() +
+                              " (type: " + this.getTypeName() + ")");
+    }
+    
+    return new SortedSetFieldSource(field.getName(), selectorType);
+  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/java/org/apache/solr/schema/TrieField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieField.java b/solr/core/src/java/org/apache/solr/schema/TrieField.java
index ebe2103..90b27e4 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieField.java
@@ -160,30 +160,25 @@ public class TrieField extends NumericFieldType {
   }
 
   @Override
-  public SortField getSortField(SchemaField field, boolean top) {
-    field.checkSortability();
-
-    Object missingValue = null;
-    boolean sortMissingLast  = field.sortMissingLast();
-    boolean sortMissingFirst = field.sortMissingFirst();
-
-    SortField sf;
-
-    switch (type) {
-      case INTEGER:
-        return getSortField(field, SortField.Type.INT, top, Integer.MIN_VALUE, Integer.MAX_VALUE);
-      case FLOAT:
-        return getSortField(field, SortField.Type.FLOAT, top, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY);
-      case DATE: // fallthrough
-      case LONG:
-        return getSortField(field, SortField.Type.LONG, top, Long.MIN_VALUE, Long.MAX_VALUE);
-      case DOUBLE:
-        return getSortField(field, SortField.Type.DOUBLE, top, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
-      default:
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + field.name);
+  public SortField getSortField(SchemaField field, boolean reverse) {
+    // NOTE: can't use getNumericSort because our multivalued case is special: we use SortedSet
+
+    if (field.multiValued()) {
+      MultiValueSelector selector = field.type.getDefaultMultiValueSelectorForSort(field, reverse);
+      if (null != selector) {
+        return getSortedSetSortField(field, selector.getSortedSetSelectorType(),
+                                     // yes: we really want Strings here, regardless of NumberType
+                                     reverse, SortField.STRING_FIRST, SortField.STRING_LAST);
+      }
     }
+    
+    // else...
+    // either single valued, or don't support implicit multi selector
+    // (in which case let getSortField() give the error)
+    NumberType type = getNumberType();
+    return getSortField(field, type.sortType, reverse, type.sortMissingLow, type.sortMissingHigh);
   }
-  
+
   @Override
   public Type getUninversionType(SchemaField sf) {
     if (sf.multiValued()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/test-files/solr/collection1/conf/schema11.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema11.xml b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
index 25b7e22..d09e209 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema11.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
@@ -339,10 +339,35 @@ valued. -->
    <field name="cat_floatDocValues" type="float"  indexed="true" stored="true" docValues="true" multiValued="true" />
    <field name="cat_length" type="text_length" indexed="true" stored="true" multiValued="true"/>
 
+   <!-- see TestMinMaxOnMultiValuedField -->
+   <!-- NOTE: "string" type configured with sortMissingLast="true" 
+        we need a multivalued string for sort testing using sortMissing*="false"
+   -->
+   <field name="val_strs_dv" type="string" indexed="true" stored="true"
+          docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="false" />
+   <!-- specific multivalued fields of each type with sortMissing First/Last -->
+   <field name="val_str_missf_s_dv" type="string" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_str_missl_s_dv" type="string" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   <field name="val_int_missf_s_dv" type="int" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_int_missl_s_dv" type="int" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   <field name="val_long_missf_s_dv" type="long" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_long_missl_s_dv" type="long" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   <field name="val_float_missf_s_dv" type="float" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_float_missl_s_dv" type="float" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   <field name="val_double_missf_s_dv" type="double" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_double_missl_s_dv" type="double" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   <field name="val_date_missf_s_dv" type="date" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_date_missl_s_dv" type="date" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   <field name="val_bool_missf_s_dv" type="boolean" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_bool_missl_s_dv" type="boolean" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   <field name="val_enum_missf_s_dv" type="severityType" docValues="true" multiValued="true" sortMissingFirst="true" sortMissingLast="false" />
+   <field name="val_enum_missl_s_dv" type="severityType" docValues="true" multiValued="true" sortMissingFirst="false" sortMissingLast="true" />
+   
 
    <!-- Enum type -->
    <field name="severity" type="severityType" docValues="true" indexed="true" stored="true" multiValued="false"/>
 
+   
    <!-- Dynamic field definitions.  If a field name is not found, dynamicFields
         will be used if the name matches any of the patterns.
         RESTRICTION: the glob-like pattern in the name attribute must have

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
index af5b022..d5d2e1f 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
@@ -167,7 +167,8 @@ public class TestPointFields extends SolrTestCaseJ4 {
       doTestPointFieldSort(field, randomIntsMissing);
       doTestIntPointFunctionQuery(field);
     }
-    
+
+    // no docvalues
     for (String r : Arrays.asList("*_p_i_ni", "*_p_i_ni_ns")) {
       assertTrue(r, regexToTest.remove(r));
       String field = r.replace("*", "number");
@@ -175,18 +176,38 @@ public class TestPointFields extends SolrTestCaseJ4 {
       doTestPointFieldFunctionQueryError(field, "w/o docValues", toStringArray(getRandomInts(1, false)));
     }
     
-    for (String r : Arrays.asList("*_p_i_mv", "*_p_i_ni_mv", "*_p_i_ni_mv_dv", "*_p_i_ni_dv_ns_mv",
-                                  "*_p_i_ni_ns_mv", "*_p_i_dv_ns_mv", "*_p_i_mv_dv",
-                                  "*_p_i_mv_smf", "*_p_i_mv_dv_smf", "*_p_i_ni_mv_dv_smf",
-                                  "*_p_i_mv_sml", "*_p_i_mv_dv_sml", "*_p_i_ni_mv_dv_sml")) {
+    // multivalued, no docvalues
+    for (String r : Arrays.asList("*_p_i_mv", "*_p_i_ni_mv", "*_p_i_ni_ns_mv", 
+                                  "*_p_i_mv_smf", "*_p_i_mv_sml")) {
+           
+      assertTrue(r, regexToTest.remove(r));
+      String field = r.replace("*", "number");
+      doTestPointFieldSortError(field, "w/o docValues", toStringArray(getRandomInts(1, false)));
+      int numValues = 2 * RANDOM_MULTIPLIER;
+      doTestPointFieldSortError(field, "w/o docValues", toStringArray(getRandomInts(numValues, false)));
+      doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomInts(1, false)));
+      doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomInts(numValues, false)));
+    }
+
+    // multivalued, w/ docValues
+    for (String r : Arrays.asList("*_p_i_ni_mv_dv", "*_p_i_ni_dv_ns_mv",
+                                  "*_p_i_dv_ns_mv", "*_p_i_mv_dv",
+                                  "*_p_i_mv_dv_smf", "*_p_i_ni_mv_dv_smf",
+                                  "*_p_i_mv_dv_sml", "*_p_i_ni_mv_dv_sml"
+                                  )) {
       assertTrue(r, regexToTest.remove(r));
       String field = r.replace("*", "number");
-      doTestPointFieldSortError(field, "multivalued", toStringArray(getRandomInts(1, false)));
+
+      // NOTE: only testing one value per doc here, but TestMinMaxOnMultiValuedField
+      // covers this in more depth
+      doTestPointFieldSort(field, sequential);
+      doTestPointFieldSort(field, randomInts);
+
+      // value source (w/o field(...,min|max)) usuage should still error...
       int numValues = 2 * RANDOM_MULTIPLIER;
-      doTestPointFieldSortError(field, "multivalued", toStringArray(getRandomInts(numValues, false)));
       doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomInts(1, false)));
       doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomInts(numValues, false)));
-   }
+    }
     
     assertEquals("Missing types in the test", Collections.<String>emptySet(), regexToTest);
   }
@@ -577,18 +598,35 @@ public class TestPointFields extends SolrTestCaseJ4 {
       doTestPointFieldFunctionQueryError(field, "w/o docValues", "42.34");
     }
     
-    for (String r : Arrays.asList("*_p_d_mv", "*_p_d_ni_mv", "*_p_d_ni_mv_dv", "*_p_d_ni_dv_ns_mv",
-                                  "*_p_d_ni_ns_mv", "*_p_d_dv_ns_mv", "*_p_d_mv_dv",
-                                  "*_p_d_mv_smf", "*_p_d_mv_dv_smf", "*_p_d_ni_mv_dv_smf",
-                                  "*_p_d_mv_sml", "*_p_d_mv_dv_sml", "*_p_d_ni_mv_dv_sml")) {
+    // multivalued, no docvalues
+    for (String r : Arrays.asList("*_p_d_mv", "*_p_d_ni_mv", "*_p_d_ni_ns_mv", 
+                                  "*_p_d_mv_smf", "*_p_d_mv_sml")) {
+                                  
       assertTrue(r, regexToTest.remove(r));
       String field = r.replace("*", "number");
-      doTestPointFieldSortError(field, "multivalued", "42.34");
-      doTestPointFieldSortError(field, "multivalued", "42.34", "66.6");
+      doTestPointFieldSortError(field, "w/o docValues", "42.34");
+      doTestPointFieldSortError(field, "w/o docValues", "42.34", "66.6");
       doTestPointFieldFunctionQueryError(field, "multivalued", "42.34");
       doTestPointFieldFunctionQueryError(field, "multivalued", "42.34", "66.6");
     }
     
+    // multivalued, w/ docValues
+    for (String r : Arrays.asList("*_p_d_ni_mv_dv", "*_p_d_ni_dv_ns_mv",
+                                  "*_p_d_dv_ns_mv", "*_p_d_mv_dv",
+                                  "*_p_d_mv_dv_smf", "*_p_d_ni_mv_dv_smf",
+                                  "*_p_d_mv_dv_sml", "*_p_d_ni_mv_dv_sml")) {
+      assertTrue(r, regexToTest.remove(r));
+      String field = r.replace("*", "number");
+      
+      // NOTE: only testing one value per doc here, but TestMinMaxOnMultiValuedField
+      // covers this in more depth
+      doTestPointFieldSort(field, sequential);
+      doTestPointFieldSort(field, randomDoubles);
+      
+      // value source (w/o field(...,min|max)) usuage should still error...
+      doTestPointFieldFunctionQueryError(field, "multivalued", "42.34");
+      doTestPointFieldFunctionQueryError(field, "multivalued", "42.34", "66.6");
+    }
     assertEquals("Missing types in the test", Collections.<String>emptySet(), regexToTest);
   }
   
@@ -983,18 +1021,36 @@ public class TestPointFields extends SolrTestCaseJ4 {
       doTestPointFieldFunctionQueryError(field, "w/o docValues", "42.34");
     }
     
-    for (String r : Arrays.asList("*_p_f_mv", "*_p_f_ni_mv", "*_p_f_ni_mv_dv", "*_p_f_ni_dv_ns_mv",
-                                  "*_p_f_ni_ns_mv", "*_p_f_dv_ns_mv", "*_p_f_mv_dv",  
-                                  "*_p_f_mv_smf", "*_p_f_mv_dv_smf", "*_p_f_ni_mv_dv_smf",
-                                  "*_p_f_mv_sml", "*_p_f_mv_dv_sml", "*_p_f_ni_mv_dv_sml")) {
+    // multivalued, no docvalues
+    for (String r : Arrays.asList("*_p_f_mv", "*_p_f_ni_mv", "*_p_f_ni_ns_mv", 
+                                  "*_p_f_mv_smf", "*_p_f_mv_sml")) {
+                                  
       assertTrue(r, regexToTest.remove(r));
       String field = r.replace("*", "number");
-      doTestPointFieldSortError(field, "multivalued", "42.34");
-      doTestPointFieldSortError(field, "multivalued", "42.34", "66.6");
+      doTestPointFieldSortError(field, "w/o docValues", "42.34");
+      doTestPointFieldSortError(field, "w/o docValues", "42.34", "66.6");
       doTestPointFieldFunctionQueryError(field, "multivalued", "42.34");
       doTestPointFieldFunctionQueryError(field, "multivalued", "42.34", "66.6");
     }
-    
+
+    // multivalued, w/ docValues
+    for (String r : Arrays.asList("*_p_f_ni_mv_dv", "*_p_f_ni_dv_ns_mv",
+                                  "*_p_f_dv_ns_mv", "*_p_f_mv_dv",  
+                                  "*_p_f_mv_dv_smf", "*_p_f_ni_mv_dv_smf",
+                                  "*_p_f_mv_dv_sml", "*_p_f_ni_mv_dv_sml")) {
+      assertTrue(r, regexToTest.remove(r));
+      String field = r.replace("*", "number");
+
+      // NOTE: only testing one value per doc here, but TestMinMaxOnMultiValuedField
+      // covers this in more depth
+      doTestPointFieldSort(field, sequential);
+      doTestPointFieldSort(field, randomFloats);
+      
+      // value source (w/o field(...,min|max)) usuage should still error...
+      doTestPointFieldFunctionQueryError(field, "multivalued", "42.34");
+      doTestPointFieldFunctionQueryError(field, "multivalued", "42.34", "66.6");
+     
+    }    
     assertEquals("Missing types in the test", Collections.<String>emptySet(), regexToTest);
   }
   
@@ -1329,7 +1385,8 @@ public class TestPointFields extends SolrTestCaseJ4 {
       doTestPointFieldSort(field, randomLongsMissing);
       doTestLongPointFunctionQuery(field);
     }
-
+    
+    // no docvalues
     for (String r : Arrays.asList("*_p_l_ni", "*_p_l_ni_ns")) {
       assertTrue(r, regexToTest.remove(r));
       String field = r.replace("*", "number");
@@ -1337,19 +1394,37 @@ public class TestPointFields extends SolrTestCaseJ4 {
       doTestPointFieldFunctionQueryError(field, "w/o docValues", toStringArray(getRandomLongs(1, false)));
     }
     
-    for (String r : Arrays.asList("*_p_l_mv", "*_p_l_ni_mv", "*_p_l_ni_mv_dv", "*_p_l_ni_dv_ns_mv",
-                                  "*_p_l_ni_ns_mv", "*_p_l_dv_ns_mv", "*_p_l_mv_dv",
-                                  "*_p_l_mv_smf", "*_p_l_mv_dv_smf", "*_p_l_ni_mv_dv_smf",
-                                  "*_p_l_mv_sml", "*_p_l_mv_dv_sml", "*_p_l_ni_mv_dv_sml")) {
+    // multivalued, no docvalues
+    for (String r : Arrays.asList("*_p_l_mv", "*_p_l_ni_mv", "*_p_l_ni_ns_mv", 
+                                  "*_p_l_mv_smf", "*_p_l_mv_sml")) {
+                                  
       assertTrue(r, regexToTest.remove(r));
       String field = r.replace("*", "number");
-      doTestPointFieldSortError(field, "multivalued", toStringArray(getRandomLongs(1, false)));
+      doTestPointFieldSortError(field, "w/o docValues", toStringArray(getRandomLongs(1, false)));
+      int numValues = 2 * RANDOM_MULTIPLIER;
+      doTestPointFieldSortError(field, "w/o docValues", toStringArray(getRandomLongs(numValues, false)));
+      doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomLongs(1, false)));
+      doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomLongs(numValues, false)));
+    }
+    // multivalued, w/ docValues
+    for (String r : Arrays.asList("*_p_l_ni_mv_dv", "*_p_l_ni_dv_ns_mv",
+                                  "*_p_l_dv_ns_mv", "*_p_l_mv_dv",
+                                  "*_p_l_mv_dv_smf", "*_p_l_ni_mv_dv_smf",
+                                  "*_p_l_mv_dv_sml", "*_p_l_ni_mv_dv_sml")) {
+
+      assertTrue(r, regexToTest.remove(r));
+      String field = r.replace("*", "number");
+
+      // NOTE: only testing one value per doc here, but TestMinMaxOnMultiValuedField
+      // covers this in more depth
+      doTestPointFieldSort(field, vals);
+      doTestPointFieldSort(field, randomLongs);
+
+      // value source (w/o field(...,min|max)) usuage should still error...
       int numValues = 2 * RANDOM_MULTIPLIER;
-      doTestPointFieldSortError(field, "multivalued", toStringArray(getRandomLongs(numValues, false)));
       doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomLongs(1, false)));
       doTestPointFieldFunctionQueryError(field, "multivalued", toStringArray(getRandomLongs(numValues, false)));
     }
-    
     assertEquals("Missing types in the test", Collections.<String>emptySet(), regexToTest);
   }
   
@@ -1679,19 +1754,36 @@ public class TestPointFields extends SolrTestCaseJ4 {
       doTestPointFieldFunctionQueryError(field, "w/o docValues", "1995-12-31T23:59:59Z");
     }
     
-    for (String r : Arrays.asList("*_p_dt_mv", "*_p_dt_ni_mv", "*_p_dt_ni_mv_dv", "*_p_dt_ni_dv_ns_mv",
-                                  "*_p_dt_ni_ns_mv", "*_p_dt_dv_ns_mv", "*_p_dt_mv_dv",
-                                  "*_p_dt_mv_smf", "*_p_dt_mv_dv_smf", "*_p_dt_ni_mv_dv_smf",
-                                  "*_p_dt_mv_sml", "*_p_dt_mv_dv_sml", "*_p_dt_ni_mv_dv_sml")) {
+    // multivalued, no docvalues
+    for (String r : Arrays.asList("*_p_dt_mv", "*_p_dt_ni_mv", "*_p_dt_ni_ns_mv", 
+                                  "*_p_dt_mv_smf", "*_p_dt_mv_sml")) {
+                                  
       assertTrue(r, regexToTest.remove(r));
       String field = r.replace("*", "number");
-      doTestPointFieldSortError(field, "multivalued", "1995-12-31T23:59:59Z");
-      doTestPointFieldSortError(field, "multivalued", "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z");
+      doTestPointFieldSortError(field, "w/o docValues", "1995-12-31T23:59:59Z");
+      doTestPointFieldSortError(field, "w/o docValues", "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z");
       doTestPointFieldFunctionQueryError(field, "multivalued", "1995-12-31T23:59:59Z");
       doTestPointFieldFunctionQueryError(field, "multivalued", "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z");
                                 
     }
-    
+
+    // multivalued, w/ docValues
+    for (String r : Arrays.asList("*_p_dt_ni_mv_dv", "*_p_dt_ni_dv_ns_mv",
+                                  "*_p_dt_dv_ns_mv", "*_p_dt_mv_dv",
+                                  "*_p_dt_mv_dv_smf", "*_p_dt_ni_mv_dv_smf",
+                                  "*_p_dt_mv_dv_sml", "*_p_dt_ni_mv_dv_sml")) {
+      assertTrue(r, regexToTest.remove(r));
+      String field = r.replace("*", "number");
+
+      // NOTE: only testing one value per doc here, but TestMinMaxOnMultiValuedField
+      // covers this in more depth
+      doTestPointFieldSort(field, sequential);
+      doTestPointFieldSort(field, randomDates);
+
+      // value source (w/o field(...,min|max)) usuage should still error...
+      doTestPointFieldFunctionQueryError(field, "multivalued", "1995-12-31T23:59:59Z");
+      doTestPointFieldFunctionQueryError(field, "multivalued", "1995-12-31T23:59:59Z", "2000-12-31T23:59:59Z");
+    }    
     assertEquals("Missing types in the test", Collections.<String>emptySet(), regexToTest);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/core/src/test/org/apache/solr/search/function/TestMinMaxOnMultiValuedField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/function/TestMinMaxOnMultiValuedField.java b/solr/core/src/test/org/apache/solr/search/function/TestMinMaxOnMultiValuedField.java
index f0dac60..a90d51b 100644
--- a/solr/core/src/test/org/apache/solr/search/function/TestMinMaxOnMultiValuedField.java
+++ b/solr/core/src/test/org/apache/solr/search/function/TestMinMaxOnMultiValuedField.java
@@ -16,6 +16,11 @@
  */
 package org.apache.solr.search.function;
 
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.SolrTestCaseJ4;
@@ -27,9 +32,14 @@ import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.schema.IntValueFieldType;
 import org.apache.solr.schema.LongValueFieldType;
 import org.apache.solr.schema.SchemaField;
+import org.apache.solr.schema.TrieField;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+/**
+ * Tests the behavior of <code>field(foo,min|max)</code> on numerious types of multivalued 'foo' fields,
+ * as well as the beahvior of sorting on <code>foo asc|desc</code> to implicitly choose the min|max.
+ */
 @SuppressCodecs({"Memory", "SimpleText"}) // see TestSortedSetSelector
 public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
 
@@ -40,6 +50,16 @@ public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
     initCore("solrconfig-functionquery.xml","schema11.xml");
     checkFields(new String[] {"i", "l", "f", "d"}, new String [] {"_p", "_ni_p"});
     checkFields(new String[] {"ti", "tl", "tf", "td"}, new String [] {"", "_dv", "_ni_dv"});
+    checkFields(new String[] {"str", // no expectation on missing first/last
+                              "str_missf_", "str_missl_",
+                              "int_missf_", "int_missl_",
+                              "long_missf_", "long_missl_",
+                              "float_missf_", "float_missl_",
+                              "double_missf_", "double_missl_",
+                              "date_missf_", "date_missl_",
+                              "enum_missf_", "enum_missl_",
+                              "bool_missf_", "bool_missl_"  }, new String [] {"_dv"});
+      
   }
   
   private static void checkFields(String[] types, String[] suffixes) {
@@ -55,6 +75,16 @@ public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
                      || Boolean.getBoolean(NUMERIC_DOCVALUES_SYSPROP), sf.hasDocValues());
         assertEquals(f + " doesn't have expected index status",
                      ! f.contains("ni"), sf.indexed());
+
+        if (f.contains("miss")) {
+          // if name contains "miss" assert that the missing first/last props match
+          // but don't make any asserts about fields w/o that in name
+          // (schema11.xml's strings has some preexisting silliness that don't affect us)
+          assertEquals(f + " sortMissingFirst is wrong",
+                       f.contains("missf"), sf.sortMissingFirst());
+          assertEquals(f + " sortMissingLast is wrong",
+                       f.contains("missl"), sf.sortMissingLast());
+        }
       }
     }
   }
@@ -62,7 +92,7 @@ public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
   /** Deletes all docs (which may be left over from a previous test */
   @Before
   public void before() throws Exception {
-    assertU(delQ("*:*"));
+    clearIndex();
     assertU(commit());
   }
   
@@ -79,7 +109,7 @@ public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
     assertTrue("Unexpected float field", h.getCore().getLatestSchema().getField(floatField).getType() instanceof FloatValueFieldType);
     assertTrue("Unexpected double field", h.getCore().getLatestSchema().getField(doubleField).getType() instanceof DoubleValueFieldType);
 
-    assertU(delQ("*:*"));
+    clearIndex();
     assertU(adoc(sdoc("id", "1"
                       // int
                       ,intField, "42"
@@ -145,69 +175,164 @@ public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
             ,"//double[@name='min_d']='-420.5'"
             ,"//double[@name='max_d']='-40.5'"
             );
+  }
 
+  public void testBasicStrings() {
+    assertU(adoc(sdoc("id", "1",
+                      "val_strs_dv", "dog",
+                      "val_strs_dv", "xyz",
+                      "val_strs_dv", "cat")));
+    assertU(adoc(sdoc("id", "2"))); // 2 has no val_strs_dv values
+    assertU(commit());
 
+    // id=1: has values
+    assertQ(req("q","id:1"
+                ,"fl","exists_min_str:exists(field(val_strs_dv,min))"
+                ,"fl","exists_max_str:exists(field(val_strs_dv,max))"
+                ,"fl","min_str:field(val_strs_dv,min)"
+                ,"fl","max_str:field(val_strs_dv,max)"
+                
+                )
+            ,"//*[@numFound='1']"
+            ,"//bool[@name='exists_min_str']='true'"
+            ,"//bool[@name='exists_max_str']='true'"
+            ,"//str[@name='min_str']='cat'"
+            ,"//str[@name='max_str']='xyz'"
+            );
+    // id=2: no values
+    assertQ(req("q","id:2"
+                ,"fl","exists_min_str:exists(field(val_strs_dv,min))"
+                ,"fl","exists_max_str:exists(field(val_strs_dv,max))"
+                ,"fl","min_str:field(val_strs_dv,min)"
+                ,"fl","max_str:field(val_strs_dv,max)"
+                
+                )
+            ,"//*[@numFound='1']"
+            ,"//bool[@name='exists_min_str']='false'"
+            ,"//bool[@name='exists_max_str']='false'"
+            ,"count(//*[@name='min_str'])=0"
+            ,"count(//*[@name='max_str'])=0"
+            );
   }
 
+  public void testExpectedSortOrderingStrings() {
+    testExpectedSortOrdering("val_strs_dv", false,
+                             null, "a", "cat", "dog", "wako", "xyz", "zzzzz");
+  }
+
+  public void testExpectedSortMissingOrderings() {
+
+    // NOTE: we never test the "true" min/max value for a type, because
+    // (in this simple test) we aren't using a secondary sort, so there is no way to disambiguate
+    // docs that have those values from docs that have those *effective* sort values
+
+    testSortMissingMinMax("val_str", "a", "aaaaaa", "xxxxx", "zzzzzzzzzzzzzzzzzzz");
+    testSortMissingMinMax("val_int",
+                          Integer.MIN_VALUE+1L, -9999, 0, 99999, Integer.MAX_VALUE-1L);
+    testSortMissingMinMax("val_long",
+                          Long.MIN_VALUE+1L, -99999999L, 0, 9999999999L, Long.MAX_VALUE-1L);
+    testSortMissingMinMax("val_float",
+                          Math.nextAfter(Float.NEGATIVE_INFINITY, 0F), -99.99F,
+                          0F, 99.99F, Math.nextAfter(Float.POSITIVE_INFINITY, 0F));
+    testSortMissingMinMax("val_double",
+                          Math.nextAfter(Double.NEGATIVE_INFINITY, 0D), -99.99D, 
+                          0D, 99.99D, Math.nextAfter(Double.POSITIVE_INFINITY, 0F));
+    testSortMissingMinMax("val_date",
+                          "-1000000-01-01T00:00:00Z", "NOW-1YEAR", "NOW", "NOW+1YEAR", "+1000000-01-01T00:00:00Z");
+    testSortMissingMinMax("val_bool", false, true);
+    testSortMissingMinMax("val_enum", "Not Available", "Low", "High", "Critical");
+
+  }
+  
+
   @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-6709")
   public void testIntFieldCache() {
     testSimpleInt("val_tis");
+    testExpectedSortOrderingInt("val_tis", true);
   }
   
   public void testPointInt() {
     testSimpleInt("val_is_p");
     testSimpleInt("val_is_ni_p");
+    
+    testExpectedSortOrderingInt("val_is_p", false);
+    testExpectedSortOrderingInt("val_is_ni_p", false);
   }
   
   public void testIntDocValues() {
     testSimpleInt("val_tis_dv");
     testSimpleInt("val_tis_ni_dv");
+    
+    testExpectedSortOrderingInt("val_tis_dv", true);
+    testExpectedSortOrderingInt("val_tis_ni_dv", true);
   }
 
   @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-6709")
   public void testLongFieldCache() {
     testSimpleLong("val_tls");
+    testExpectedSortOrderingLong("val_tls", true);
   }
   
   public void testLongDocValues() {
     testSimpleLong("val_tls_dv");
     testSimpleLong("val_tls_ni_dv");
+    
+    testExpectedSortOrderingLong("val_tls_dv", true);
+    testExpectedSortOrderingLong("val_tls_ni_dv", true);
   }
   
   public void testPointLong() {
     testSimpleLong("val_ls_p");
     testSimpleLong("val_ls_ni_p");
+    
+    testExpectedSortOrderingLong("val_ls_p", false);
+    testExpectedSortOrderingLong("val_ls_ni_p", false);
   }
 
 
   @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-6709")
   public void testFloatFieldCache() {
     testSimpleFloat("val_tfs");
+    testExpectedSortOrderingFloat("val_tfs", true);
   }
   
   public void testFloatDocValues() {
     testSimpleFloat("val_tfs_dv");
     testSimpleFloat("val_tfs_ni_dv");
+    
+    testExpectedSortOrderingFloat("val_tfs_dv", true);
+    testExpectedSortOrderingFloat("val_tfs_ni_dv", true);
   }
   
   public void testPointFloat() {
     testSimpleFloat("val_fs_p");
     testSimpleFloat("val_fs_ni_p");
+    
+    testExpectedSortOrderingFloat("val_fs_p", false);
+    testExpectedSortOrderingFloat("val_fs_ni_p", false);
   }
   
   @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-6709")
   public void testDoubleFieldCache() {
     testSimpleDouble("val_tds");
+    
+    testExpectedSortOrderingDouble("val_tds", true);
   }
   
   public void testDoubleDocValues() {
     testSimpleDouble("val_tds_dv");
     testSimpleDouble("val_tds_ni_dv");
+    
+    testExpectedSortOrderingDouble("val_tds_dv", true);
+    testExpectedSortOrderingDouble("val_tds_ni_dv", true);
   }
 
   public void testPointDouble() {
     testSimpleDouble("val_ds_p");
     testSimpleDouble("val_ds_ni_p");
+    
+    testExpectedSortOrderingDouble("val_ds_p", false);
+    testExpectedSortOrderingDouble("val_ds_ni_p", false);
   }
 
   public void testBadRequests() {
@@ -240,13 +365,22 @@ public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
               SolrException.ErrorCode.BAD_REQUEST);
     
     // useful error if min/max is unsupported for fieldtype
-    assertQEx("no error asking for max on a str field",
-              "cat_docValues",
-              req("q","*:*", "fl", "field(cat_docValues,'max')"),
+    assertQEx("no error mentioning field name when asking for max on type that doesn't support it",
+              "cat_length",
+              req("q","*:*", "fl", "field(cat_length,'max')"),
+              SolrException.ErrorCode.BAD_REQUEST);
+    assertQEx("no error mentioning type when asking for max on type that doesn't support it",
+              "text_length",
+              req("q","*:*", "fl", "field(cat_length,'max')"),
+              SolrException.ErrorCode.BAD_REQUEST);
+    // type supports, but field doesn't have docValues
+    assertQEx("no error mentioning field name when asking for max on a non-dv str field",
+              "cat",
+              req("q","*:*", "fl", "field(cat,'max')"),
               SolrException.ErrorCode.BAD_REQUEST);
-    assertQEx("no error asking for max on a str field",
-              "string",
-              req("q","*:*", "fl", "field(cat_docValues,'max')"),
+    assertQEx("no error mentioning 'docValues' when asking for max on a non-dv str field",
+              "docValues",
+              req("q","*:*", "fl", "field(cat,'max')"),
               SolrException.ErrorCode.BAD_REQUEST);
     
   }
@@ -501,5 +635,264 @@ public class TestMinMaxOnMultiValuedField extends SolrTestCaseJ4 {
             ,"//result/doc["+numDocs+"]/str[@name='id']='0'"
             );
   }
+
+  /** @see #testExpectedSortOrdering */
+  private void testExpectedSortOrderingInt(final String f, final boolean trieFieldHack) {
+    // first a quick test where every doc has a value
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Integer.MIN_VALUE, -9999, 0, 1000, Integer.MAX_VALUE);
+
+    // now where one doc has no values
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Integer.MIN_VALUE, -9999, -42, -15, -3,
+                             null, 7, 53, 1000, 121212112, Integer.MAX_VALUE);
+  }
+  
+  /** @see #testExpectedSortOrdering */
+  private void testExpectedSortOrderingLong(final String f, final boolean trieFieldHack) {
+    // first a quick test where every doc has a value
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Long.MIN_VALUE, -4200L, 0, 121212112, Long.MAX_VALUE);
+
+    // now where one doc has no values
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Long.MIN_VALUE, ((long)Integer.MIN_VALUE)-1L, -4200L,
+                             -150L, -3L, null, 70L, 530L, 121212112,
+                             1L+(long)Integer.MAX_VALUE, Long.MAX_VALUE);
+                                           
+  }
+  
+  /** @see #testExpectedSortOrdering */
+  private void testExpectedSortOrderingFloat(final String f, final boolean trieFieldHack) {
+    // first a quick test where every doc has a value
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Float.NEGATIVE_INFINITY, -15.0, 0F, 121212.112, Float.POSITIVE_INFINITY);
+
+    // now where one doc has no values
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Float.NEGATIVE_INFINITY, -9999.999, -42.3, -15.0, -0.3,
+                             null, 0.7, 5.3, 1000, 121212.112, Float.POSITIVE_INFINITY);
+                             
+  }
+  
+  /** @see #testExpectedSortOrdering */
+  private void testExpectedSortOrderingDouble(final String f, final boolean trieFieldHack) {
+    // first a quick test where every doc has a value
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Double.NEGATIVE_INFINITY, -9999.999D,
+                             0D, 121212.112D, Double.POSITIVE_INFINITY);
+
+    // now where one doc has no values
+    testExpectedSortOrdering(f, trieFieldHack,
+                             Double.NEGATIVE_INFINITY, -9999.999D, -42.3D, -15.0D, -0.3D,
+                             null, 0.7D, 5.3D, 1000, 121212.112D, Double.POSITIVE_INFINITY);
+  }
+
+  /**
+   * Given a <code>fieldPrefix</code> and a list of sorted values which may <em>not</em> contain null, this method tests that sortMissingLast and sortMissingFirst fields using those prefixes sort correctly when {@link #buildMultiValueSortedDocuments} is used to generate documents containing these values <em>and</em> an additional document with no values in the field.
+   *
+   * <p>
+   * Permutations tested:
+   * </p>
+   * <ul>
+   *  <li><code>fieldPrefix</code> + <code>"_missf_s_dv"</code> asc</li>
+   *  <li><code>fieldPrefix</code> + <code>"_missf_s_dv"</code> desc</li>
+   *  <li><code>fieldPrefix</code> + <code>"_missl_s_dv"</code> asc</li>
+   *  <li><code>fieldPrefix</code> + <code>"_missl_s_dv"</code> desc</li>
+   * </ul>
+   *
+   * @see #buildMultiValueSortedDocuments
+   * @see #testExpectedSortOrdering(String,List)
+   */
+  private void testSortMissingMinMax(final String fieldPrefix,
+                                     Object... sortedValues) {
+
+    for (Object obj : sortedValues) { // sanity check
+      assertNotNull("this helper method can't be used with 'null' values", obj);
+    }
+    
+    for (String suffix : Arrays.asList("_missf_s_dv", "_missl_s_dv")) {
+
+      final String f = fieldPrefix + suffix;
+      final boolean first = f.contains("missf");
+    
+      final List<Object> asc_vals = new ArrayList<>(sortedValues.length + 1);
+      Collections.addAll(asc_vals, sortedValues);
+      final List<Object> desc_vals = new ArrayList<>(sortedValues.length + 1);
+      Collections.addAll(desc_vals, sortedValues);
+      Collections.reverse(desc_vals);
+      
+      asc_vals.add(first ? 0 : sortedValues.length, null);
+      desc_vals.add(first ? 0 : sortedValues.length, null);
+      
+      testExpectedSortOrdering(f + " asc", buildMultiValueSortedDocuments(f, asc_vals));
+      testExpectedSortOrdering(f + " desc", buildMultiValueSortedDocuments(f, desc_vals));
+    }
+  }
+
+  /**
+   * Given a (multivalued) field name and an (ascending) sorted list of values, this method uses {@link #buildMultiValueSortedDocuments} to generate and test multiple function &amp; sort permutations ...
+   * <ul>
+   *  <li><code>f asc</code> (implicitly min)</li>
+   *  <li><code>field(f,min) asc</code></li>
+   *  <li><code>field(f,min) desc</code></li>
+   *  <li><code>f desc</code> (implicitly max)</li>
+   *  <li><code>field(f,max) desc</code></li>
+   *  <li><code>field(f,max) asc</code></li>
+   * </ul>
+   *
+   * <p>
+   * <b>NOTE:</b> if the sortedValues includes "null" then the field must <em>NOT</em> use <code>sortMissingFirst</code> or <code>sortMissingLast</code></b>
+   * </p>
+   *
+   * @param f the field to test
+   * @param trieFieldHack if this param and {@link #NUMERIC_POINTS_SYSPROP} are both true, then the <code>field(f,min|max)</code> functions will be wrapped in <code>def(...,0)</code> and the implicit <code>f asc|desc</code> syntax will not be tested -- see SOLR-8005 for the reason.
+   * @param sortedValues the values to use when building the docs and validating the sort
+   *
+   * @see #buildMultiValueSortedDocuments
+   * @see #testExpectedSortOrdering(String,List)
+   * @see #clearIndex
+   */
+  private void testExpectedSortOrdering(final String f, boolean trieFieldHack,
+                                        Object... sortedValues) {
+
+    SchemaField sf = h.getCore().getLatestSchema().getField(f);
+    assertFalse("this utility method does not work with fields that are sortMissingFirst|Last: " + f,
+                sf.sortMissingFirst() || sf.sortMissingLast());
+    
+    // make a copy we can re-order later
+    final List<Object> vals = new ArrayList<Object>(sortedValues.length);
+    Collections.addAll(vals, sortedValues);
+      
+    String minFunc = "field("+f+",min)";
+    String maxFunc = "field("+f+",max)";
+
+    if (Boolean.getBoolean(NUMERIC_POINTS_SYSPROP)) {
+      // we don't need to mess with this hack at all if we're using all point numerics
+      trieFieldHack = false;
+    }
+
+    if (trieFieldHack // SOLR-8005
+        // if this line of code stops compiling, then trie fields have been removed from solr
+        // and the entire trieFieldHack param should be removed from this method (and callers)
+        && null != TrieField.class) {
+      
+      // the SOLR-8005 hack is only needed if/when a doc has no value...
+      trieFieldHack = false; // assume we're safe
+      for (Object val : vals) {
+        if (null == val) { // we're not safe
+          trieFieldHack = true;
+          break;
+        }
+      }
+    }
+    if (trieFieldHack) {
+      // if we've made it this far, and we still need the hack, we have to wrap our
+      // functions with a default...
+      minFunc = "def(" + minFunc + ",0)";
+      maxFunc = "def(" + maxFunc + ",0)";
+      // and we can't test implicit min/max default behavior...
+    }
+    
+    // // // // min
+    
+    final List<SolrInputDocument> min_asc = buildMultiValueSortedDocuments(f, vals);
+    
+    // explicit min + asc
+    testExpectedSortOrdering(minFunc + " asc", min_asc);
+    // implicit: asc -> min
+    if (!trieFieldHack) testExpectedSortOrdering(f + " asc", min_asc);
+    
+    final List<SolrInputDocument> min_desc = new ArrayList<>(min_asc);
+    Collections.reverse(min_desc);
+    
+    // explicit min + desc
+    testExpectedSortOrdering(minFunc + " desc", min_desc);
+
+    // // // // max
+    Collections.reverse(vals);
+    
+    final List<SolrInputDocument> max_desc = buildMultiValueSortedDocuments(f, vals);
+
+    // explicit: max + desc
+    testExpectedSortOrdering(maxFunc +" desc", max_desc);
+    // implicit: desc -> max
+    if (!trieFieldHack) testExpectedSortOrdering(f + " desc", max_desc); 
+    
+    final List<SolrInputDocument> max_asc = new ArrayList<>(max_desc);
+    Collections.reverse(max_asc);
+    
+    // explicit max + asc
+    testExpectedSortOrdering(maxFunc + " asc", max_asc);
+  }
   
+  /**
+   * Given a sort clause, and a list of documents in sorted order, this method will clear the index 
+   * and then add the documents in a random order (to ensure the index insertion order is not a factor) 
+   * and then validate that a <code>*:*</code> query returns the documents in the original order.
+   *
+   * @see #buildMultiValueSortedDocuments
+   * @see #clearIndex
+   */   
+  private void testExpectedSortOrdering(final String sort,
+                                        final List<SolrInputDocument> sortedDocs) {
+    clearIndex();
+
+    // shuffle a copy of the doc list (to ensure index order isn't linked to uniqueKey order)
+    List<SolrInputDocument> randOrderedDocs = new ArrayList<>(sortedDocs);
+    Collections.shuffle(randOrderedDocs, random());
+
+    for (SolrInputDocument doc : randOrderedDocs) {
+      assertU(adoc(doc));
+    }
+    assertU(commit());
+
+    // now use the original sorted docs to build up the expected sort order as a list of xpath
+    List<String> xpaths = new ArrayList<>(sortedDocs.size() + 1);
+    xpaths.add("//result[@numFound='"+sortedDocs.size()+"']");
+    int seq = 0;
+    for (SolrInputDocument doc : sortedDocs) {
+      xpaths.add("//result/doc["+(++seq)+"]/str[@name='id']='"+doc.getFieldValue("id")+"'");
+    }
+    assertQ(req("q", "*:*", "rows", "" + sortedDocs.size(), "sort", sort),
+            xpaths.toArray(new String[xpaths.size()]));
+  }
+
+  /**
+   * Given a (multivalued) field name and an (ascending) sorted list of values, this method will generate a List of Solr Documents of the same size such that:
+   * <ul>
+   *  <li>For each non-null value in the original list, the corrisponding document in the result will have that value in the specified field.</li>
+   *  <li>For each null value in the original list, the corrisponding document in teh result will have <em>NO</em> values in the specified field.</li>
+   *  <li>If a document has a value in the field, then some random number of values that come <em>after</em> that value in the original list may also be included in the specified field.</li>
+   *  <li>Every document in the result will have a randomly asssigned 'id', unique realitive to all other documents in the result.</li>
+   * </ul>
+   */
+  private static final List<SolrInputDocument> buildMultiValueSortedDocuments(final String f,
+                                                                              final List<Object> vals) {
+    // build a list of docIds that we can shuffle (so the id order doesn't match the value order)
+    List<Integer> ids = new ArrayList<>(vals.size());
+    for (int i = 0; i < vals.size(); i++) {
+      ids.add(i+1);
+    }
+    Collections.shuffle(ids, random());
+    
+    final List<SolrInputDocument> docs = new ArrayList<>(vals.size());
+    for (int i = 0; i < vals.size(); i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", ids.get(i));
+      Object primaryValue = vals.get(i);
+      if (null != primaryValue) {
+        doc.addField(f, primaryValue);
+        final int extraValCount = random().nextInt(vals.size() - i);
+        for (int j = 0; j < extraValCount; j++) {
+          Object extraVal = vals.get(TestUtil.nextInt(random(), i+1, vals.size() - 1));
+          if (null != extraVal) {
+            doc.addField(f, extraVal);
+          }
+        }
+      }
+      docs.add(doc);
+    }
+    return docs;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e2bba98d/solr/solr-ref-guide/src/common-query-parameters.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/common-query-parameters.adoc b/solr/solr-ref-guide/src/common-query-parameters.adoc
index 5986f97..a9d6cec 100644
--- a/solr/solr-ref-guide/src/common-query-parameters.adoc
+++ b/solr/solr-ref-guide/src/common-query-parameters.adoc
@@ -30,27 +30,31 @@ If no defType param is specified, then by default, the <<the-standard-query-pars
 
 == sort Parameter
 
-The `sort` parameter arranges search results in either ascending (`asc`) or descending (`desc`) order. The parameter can be used with either numerical or alphabetical content. The directions can be entered in either all lowercase or all uppercase letters (i.e., both `asc` or `ASC`).
+The `sort` parameter arranges search results in either ascending (`asc`) or descending (`desc`) order. The parameter can be used with either numerical or alphabetical content. The directions can be entered in either all lowercase or all uppercase letters (i.e., both `asc` and `ASC` are accepted).
 
-Solr can sort query responses according to document scores or the value of any field with a single value that is either indexed or uses <<docvalues.adoc#docvalues,DocValues>> (that is, any field whose attributes in the Schema include `multiValued="false"` and either `docValues="true"` or `indexed="true"` – if the field does not have DocValues enabled, the indexed terms are used to build them on the fly at runtime), provided that:
+Solr can sort query responses according to:
 
-* the field is non-tokenized (that is, the field has no analyzer and its contents have been parsed into tokens, which would make the sorting inconsistent), or
+* Document scores
+* <<function-queries.adoc#sort-by-function,Function results>>
+* The value of any primative field (numerics, string, boolean, dates, etc...) which has `docValues="true"` (or `multiValued="false"` and `indexed="true"` in which case the indexed terms will used to build DocValue like structures on the fly at runtime)
+* A TextField that uses an analyzer (such as the KeywordTokenizer) that produces only a single term.
+** *NOTE:* If you want to be able to sort on a field whose contents you want to tokenize to facilitate searching, <<copying-fields.adoc#copying-fields,use a `copyField` directive>> in the the Schema to clone the field. Then search on the field and sort on its clone.
 
-* the field uses an analyzer (such as the KeywordTokenizer) that produces only a single term.
+In the case of primative fields that are `multiValued="true"` the representantive value used for each doc when sorting depends on the sort direction: The minimum value in each document is used for ascending (`asc`) sorting, while the maximal value in each document is used for descending (`desc`) sorting.  This default behavior is equivilent to explicitly sorting using the 2 argument `<<function-queries.adoc#field-function,field()>>` function: `sort=field(name,min) asc` and `sort=field(name,max) desc`
 
-If you want to be able to sort on a field whose contents you want to tokenize to facilitate searching, <<copying-fields.adoc#copying-fields,use a `copyField` directive>> in the the Schema to clone the field. Then search on the field and sort on its clone.
-
-The table explains how Solr responds to various settings of the `sort` parameter.
+The table below explains how Solr responds to various settings of the `sort` parameter.
 
 // TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
 
 [cols="30,70",options="header"]
 |===
 |Example |Result
-| |If the sort parameter is omitted, sorting is performed as though the parameter were set to score `desc`.
+| |If the sort parameter is omitted, sorting is performed as though the parameter were set to `score desc`.
 |score desc |Sorts in descending order from the highest score to the lowest score.
 |price asc |Sorts in ascending order of the price field
-|inStock desc, price asc |Sorts by the contents of the `inStock` field in descending order, then within those results sorts in ascending order by the contents of the price field.
+|div(popularity,price) desc |Sorts in descending order of the result of the function `popularity / price`
+|inStock desc, price asc |Sorts by the contents of the `inStock` field in descending order, then when multiple documents have the same value for the `inStock` field, those results are sorted in ascending order by the contents of the price field.
+|categories asc, price asc |Sorts by the lowest value of the (multivalued) `categories` field in ascending order, then when multiple documents have the same lowest `categories` value, those results are sorted in ascending order by the contents of the price field.
 |===
 
 Regarding the sort parameter's arguments:


[34/41] lucene-solr:jira/solr-11702: LUCENE-8124: Fixed HyphenationCompoundWordTokenFilter to handle correctly hyphenation patterns with indicator >= 7.

Posted by da...@apache.org.
LUCENE-8124: Fixed HyphenationCompoundWordTokenFilter to handle correctly hyphenation patterns with indicator >= 7.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f5e22670
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f5e22670
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f5e22670

Branch: refs/heads/jira/solr-11702
Commit: f5e2267097df5bee3942c719facbca137a56f3f8
Parents: fc6f3a4
Author: Adrien Grand <jp...@gmail.com>
Authored: Mon Jan 22 08:46:01 2018 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Mon Jan 22 08:46:01 2018 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                                   |  3 +++
 .../compound/hyphenation/HyphenationTree.java        |  4 ++--
 .../compound/TestCompoundWordTokenFilter.java        | 15 +++++++++++++++
 3 files changed, 20 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5e22670/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 6b90215..e95d066 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -146,6 +146,9 @@ Bug Fixes
 
 * LUCENE-8130: Fix NullPointerException from TermStates.toString() (Mike McCandless)
 
+* LUCENE-8124: Fixed HyphenationCompoundWordTokenFilter to handle correctly
+  hyphenation patterns with indicator >= 7. (Holger Bruch via Adrien Grand)
+
 Other
 
 * LUCENE-8111: IndexOrDocValuesQuery Javadoc references outdated method name.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5e22670/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
index 0f7dd2b..3c72b4f 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
@@ -89,7 +89,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer {
     StringBuilder buf = new StringBuilder();
     byte v = vspace.get(k++);
     while (v != 0) {
-      char c = (char) ((v >>> 4) - 1 + '0');
+      char c = (char) (((v & 0xf0 )>>> 4) - 1 + '0');
       buf.append(c);
       c = (char) (v & 0x0f);
       if (c == 0) {
@@ -151,7 +151,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer {
     StringBuilder buf = new StringBuilder();
     byte v = vspace.get(k++);
     while (v != 0) {
-      char c = (char) ((v >>> 4) - 1);
+      char c = (char) (((v & 0xf0 )>>> 4) - 1);
       buf.append(c);
       c = (char) (v & 0x0f);
       if (c == 0) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5e22670/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
index ed3abe4..67a1bb4 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
@@ -262,6 +262,21 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
     }
 
   }
+  
+  public void testLucene8124() throws Exception {
+    InputSource is = new InputSource(getClass().getResource("hyphenation-LUCENE-8124.xml").toExternalForm());
+    HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter
+        .getHyphenationTree(is);
+
+    HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(
+        whitespaceMockTokenizer(
+                "Rindfleisch"),
+        hyphenator);
+
+    // TODO Rindfleisch returned twice is another issue of the HyphenationCompoundTokenFilter 
+    assertTokenStreamContents(tf, new String[] { "Rindfleisch", "Rind", "Rindfleisch", "fleisch"});
+  }
+
 
   public static interface MockRetainAttribute extends Attribute {
     void setRetain(boolean attr);


[15/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
deleted file mode 100644
index daa267d..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-@Slow
-public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(3)
-        .addConfig("conf", configset("cloud-minimal"))
-        .configure();
-  }
-
-  @Before
-  public void deleteCollections() throws Exception {
-    cluster.deleteAllCollections();
-  }
-
-  @Test
-  public void testAddTooManyReplicas() throws Exception {
-    final String collectionName = "TooManyReplicasInSeveralFlavors";
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .setMaxShardsPerNode(1)
-        .process(cluster.getSolrClient());
-
-    // I have two replicas, one for each shard
-
-    // Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
-    // Just get the first node any way we can.
-    // Get a node to use for the "node" parameter.
-    String nodeName = getAllNodeNames(collectionName).get(0);
-
-    // Add a replica using the "node" parameter (no "too many replicas check")
-    // this node should have 2 replicas on it
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .setNode(nodeName)
-        .process(cluster.getSolrClient());
-
-    // Three replicas so far, should be able to create another one "normally"
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .process(cluster.getSolrClient());
-
-    // This one should fail though, no "node" parameter specified
-    Exception e = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-          .process(cluster.getSolrClient());
-    });
-
-    assertTrue("Should have gotten the right error message back",
-          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-
-    // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
-    // TODO: Isn't this a bug?
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty("name", "bogus2")
-        .setNode(nodeName)
-        .process(cluster.getSolrClient());
-
-    DocCollection collectionState = getCollectionState(collectionName);
-    Slice slice = collectionState.getSlice("shard1");
-    Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
-    assertNotNull("Should have found a replica named 'bogus2'", replica);
-    assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
-
-    // Shard1 should have 4 replicas
-    assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
-
-    // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
-    // than simple calcs would indicate.
-    Exception e2 = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-          .process(cluster.getSolrClient());
-    });
-
-    assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
-    waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
-      for (Replica r : c.getReplicas()) {
-        if (r.getState() != Replica.State.ACTIVE)
-          return false;
-      }
-      return true;
-    });
-  }
-
-  @Test
-  public void testAddShard() throws Exception {
-
-    String collectionName = "TooManyReplicasWhenAddingShards";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
-        .setMaxShardsPerNode(2)
-        .process(cluster.getSolrClient());
-
-    // We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
-    // two replicas, but fail on the third.
-    CollectionAdminRequest.createShard(collectionName, "shard1")
-        .process(cluster.getSolrClient());
-
-    // Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
-    CollectionAdminRequest.createShard(collectionName, "shard2")
-        .process(cluster.getSolrClient());
-
-    // Now fail to add the third as it should exceed maxShardsPerNode
-    Exception e = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createShard(collectionName, "shard3")
-          .process(cluster.getSolrClient());
-    });
-    assertTrue("Should have gotten the right error message back",
-        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
-    List<String> nodes = getAllNodeNames(collectionName);
-
-    CollectionAdminRequest.createShard(collectionName, "shard4")
-        .setNodeSet(StringUtils.join(nodes, ","))
-        .process(cluster.getSolrClient());
-
-    // And just for yucks, insure we fail the "regular" one again.
-    Exception e2 = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createShard(collectionName, "shard5")
-          .process(cluster.getSolrClient());
-    });
-    assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
-    // should have exactly two replicas
-    waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
-      return DocCollection.isFullyActive(n, c, 4, 2);
-    });
-    Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
-    assertEquals("There should be exaclty four slices", slices.size(), 4);
-    assertNotNull("shardstart should exist", slices.get("shardstart"));
-    assertNotNull("shard1 should exist", slices.get("shard1"));
-    assertNotNull("shard2 should exist", slices.get("shard2"));
-    assertNotNull("shard4 should exist", slices.get("shard4"));
-    assertEquals("Shardstart should have exactly 2 replicas", 2, slices.get("shardstart").getReplicas().size());
-    assertEquals("Shard1 should have exactly 2 replicas", 2, slices.get("shard1").getReplicas().size());
-    assertEquals("Shard2 should have exactly 2 replicas", 2, slices.get("shard2").getReplicas().size());
-    assertEquals("Shard4 should have exactly 2 replicas", 2, slices.get("shard4").getReplicas().size());
-
-  }
-
-  @Test
-  public void testDownedShards() throws Exception {
-    String collectionName = "TooManyReplicasWhenAddingDownedNode";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
-        .setMaxShardsPerNode(2)
-        .process(cluster.getSolrClient());
-
-    // Shut down a Jetty, I really don't care which
-    JettySolrRunner jetty = cluster.getRandomJetty(random());
-    String deadNode = jetty.getBaseUrl().toString();
-    cluster.stopJettySolrRunner(jetty);
-
-    try {
-
-      // Adding a replica on a dead node should fail
-      Exception e1 = expectThrows(Exception.class, () -> {
-        CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
-            .setNode(deadNode)
-            .process(cluster.getSolrClient());
-      });
-      assertTrue("Should have gotten a message about shard not currently active: " + e1.toString(),
-          e1.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
-
-      // Should also die if we just add a shard
-      Exception e2 = expectThrows(Exception.class, () -> {
-        CollectionAdminRequest.createShard(collectionName, "shard1")
-            .setNodeSet(deadNode)
-            .process(cluster.getSolrClient());
-      });
-
-      assertTrue("Should have gotten a message about shard not currently active: " + e2.toString(),
-          e2.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
-    }
-    finally {
-      cluster.startJettySolrRunner(jetty);
-    }
-  }
-
-  private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
-    DocCollection state = getCollectionState(collectionName);
-    return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
deleted file mode 100644
index c3dc44b..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests the Cloud Collections API.
- */
-@Slow
-public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
-
-  private static final int MAX_TIMEOUT_SECONDS = 60;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(2)
-        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-        .configure();
-  }
-
-  @Test
-  public void testSolrJAPICalls() throws Exception {
-
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);
-
-    state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
-        .setShardName("shard1")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
-    assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);
-
-  }
-
-  @Test
-  public void testAsyncRequests() throws Exception {
-
-    final String collection = "testAsyncOperations";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    RequestStatusState state = CollectionAdminRequest.createCollection(collection,"conf1",1,1)
-        .setRouterName("implicit")
-        .setShards("shard1")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
-
-    //Add a few documents to shard1
-    int numDocs = TestUtil.nextInt(random(), 10, 100);
-    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
-    for (int i=0; i<numDocs; i++) {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField("id", i);
-      doc.addField("_route_", "shard1");
-      docs.add(doc);
-    }
-    client.add(collection, docs);
-    client.commit(collection);
-
-    SolrQuery query = new SolrQuery("*:*");
-    query.set("shards", "shard1");
-    assertEquals(numDocs, client.query(collection, query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.reloadCollection(collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("ReloadCollection did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.createShard(collection,"shard2")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
-
-    //Add a doc to shard2 to make sure shard2 was created properly
-    SolrInputDocument doc = new SolrInputDocument();
-    doc.addField("id", numDocs + 1);
-    doc.addField("_route_", "shard2");
-    client.add(collection, doc);
-    client.commit(collection);
-    query = new SolrQuery("*:*");
-    query.set("shards", "shard2");
-    assertEquals(1, client.query(collection, query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.deleteShard(collection,"shard2").processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteShard did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
-
-    //cloudClient watch might take a couple of seconds to reflect it
-    Slice shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
-    int count = 0;
-    while (shard1.getReplicas().size() != 2) {
-      if (count++ > 1000) {
-        fail("2nd Replica not reflecting in the cluster state");
-      }
-      Thread.sleep(100);
-    }
-
-    state = CollectionAdminRequest.createAlias("myalias",collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateAlias did not complete", RequestStatusState.COMPLETED, state);
-
-    query = new SolrQuery("*:*");
-    query.set("shards", "shard1");
-    assertEquals(numDocs, client.query("myalias", query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.deleteAlias("myalias")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteAlias did not complete", RequestStatusState.COMPLETED, state);
-
-    try {
-      client.query("myalias", query);
-      fail("Alias should not exist");
-    } catch (SolrException e) {
-      //expected
-    }
-
-    Replica replica = shard1.getReplicas().iterator().next();
-    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
-      if (!replica.getNodeName().equals(liveNode)) {
-        state = new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
-            .processAndWait(client, MAX_TIMEOUT_SECONDS);
-        assertSame("MoveReplica did not complete", RequestStatusState.COMPLETED, state);
-        break;
-      }
-    }
-
-    shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
-    String replicaName = shard1.getReplicas().iterator().next().getName();
-    state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.deleteCollection(collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
deleted file mode 100644
index 5615918..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ /dev/null
@@ -1,684 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import javax.management.MBeanServer;
-import javax.management.MBeanServerFactory;
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.lang.management.ManagementFactory;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableList;
-import org.apache.commons.io.IOUtils;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.CoreStatus;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrInfoBean.Category;
-import org.apache.solr.util.LogLevel;
-import org.apache.solr.util.TestInjection;
-import org.apache.solr.util.TimeOut;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-
-/**
- * Tests the Cloud Collections API.
- */
-@Slow
-public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @BeforeClass
-  public static void beforeCollectionsAPIDistributedZkTest() {
-    // we don't want this test to have zk timeouts
-    System.setProperty("zkClientTimeout", "240000");
-    TestInjection.randomDelayInCoreCreation = "true:20";
-    System.setProperty("validateAfterInactivity", "200");
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8");
-    configureCluster(4)
-        .addConfig("conf", configset("cloud-minimal"))
-        .addConfig("conf2", configset("cloud-minimal-jmx"))
-        .withSolrXml(solrXml)
-        .configure();
-  }
-
-  @Before
-  public void clearCluster() throws Exception {
-    try {
-      cluster.deleteAllCollections();
-    } finally {
-      System.clearProperty("zkClientTimeout");
-    }
-  }
-
-  @Test
-  public void testCreationAndDeletion() throws Exception {
-
-    String collectionName = "created_and_deleted";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-                  .contains(collectionName));
-
-    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-        .contains(collectionName));
-
-    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
-
-
-  }
-
-  @Test
-  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
-    
-    String collectionName = "out_of_sync_collection";
-
-    // manually create a collections zknode
-    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
-    CollectionAdminRequest.deleteCollection(collectionName)
-        .process(cluster.getSolrClient());
-
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-                  .contains(collectionName));
-    
-    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
-
-  }
-
-  @Test
-  public void deletePartiallyCreatedCollection() throws Exception {
-
-    final String collectionName = "halfdeletedcollection";
-
-    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    String dataDir = createTempDir().toFile().getAbsolutePath();
-    // create a core that simulates something left over from a partially-deleted collection
-    assertTrue(CollectionAdminRequest
-        .addReplicaToShard(collectionName, "shard1")
-        .setDataDir(dataDir)
-        .process(cluster.getSolrClient()).isSuccess());
-
-    CollectionAdminRequest.deleteCollection(collectionName)
-        .process(cluster.getSolrClient());
-
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .process(cluster.getSolrClient());
-
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-  }
-
-  @Test
-  public void deleteCollectionOnlyInZk() throws Exception {
-
-    final String collectionName = "onlyinzk";
-
-    // create the collections node, but nothing else
-    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
-    // delete via API - should remove collections node
-    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-    
-    // now creating that collection should work
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .process(cluster.getSolrClient());
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-  }
-
-  @Test
-  public void testBadActionNames() throws Exception {
-
-    // try a bad action
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", "BADACTION");
-    String collectionName = "badactioncollection";
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    final QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testMissingRequiredParameters() {
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("numShards", 2);
-    // missing required collection parameter
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-  }
-
-  @Test
-  public void testTooManyReplicas() {
-
-    CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(req);
-    });
-
-  }
-
-  @Test
-  public void testMissingNumShards() {
-
-    // No numShards should fail
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", "acollection");
-    params.set(REPLICATION_FACTOR, 10);
-    params.set("collection.configName", "conf");
-
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testZeroNumShards() {
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", "acollection");
-    params.set(REPLICATION_FACTOR, 10);
-    params.set("numShards", 0);
-    params.set("collection.configName", "conf");
-
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testCreateShouldFailOnExistingCore() throws Exception {
-    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
-        .setNode(cluster.getJettySolrRunner(0).getNodeName())
-        .setCoreName("halfcollection_shard1_replica_n1")
-        .process(cluster.getSolrClient()).isSuccess());
-
-    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
-        .setNode(cluster.getJettySolrRunner(1).getNodeName())
-        .setCoreName("halfcollection_shard1_replica_n1")
-        .process(cluster.getSolrClient()).isSuccess());
-
-    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
-    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
-
-    CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
-        .setCreateNodeSet(nn1 + "," + nn2)
-        .process(cluster.getSolrClient());
-    
-    SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
-    SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
-
-    assertNotNull(resp.toString(), success);
-    assertNotNull(resp.toString(), failure);
-    
-    String val1 = success.getVal(0).toString();
-    String val2 = failure.getVal(0).toString();
-    assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
-  }
-
-  @Test
-  public void testNoConfigSetExist() throws Exception {
-
-    expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
-          .process(cluster.getSolrClient());
-    });
-
-    TimeUnit.MILLISECONDS.sleep(1000);
-    // in both cases, the collection should have default to the core name
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
-  }
-
-  @Test
-  public void testCoresAreDistributedAcrossNodes() throws Exception {
-
-    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
-        .process(cluster.getSolrClient());
-
-    Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
-
-    List<String> createNodeList = new ArrayList<>();
-    createNodeList.addAll(liveNodes);
-
-    DocCollection collection = getCollectionState("nodes_used_collection");
-    for (Slice slice : collection.getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        createNodeList.remove(replica.getNodeName());
-      }
-    }
-
-    assertEquals(createNodeList.toString(), 0, createNodeList.size());
-
-  }
-
-  @Test
-  public void testDeleteNonExistentCollection() throws Exception {
-
-    SolrException e = expectThrows(SolrException.class, () -> {
-      CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
-    });
-
-    // create another collection should still work
-    CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
-        .process(cluster.getSolrClient());
-    waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
-  }
-
-  @Test
-  public void testSpecificConfigsets() throws Exception {
-    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
-    byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
-    assertNotNull(data);
-    ZkNodeProps props = ZkNodeProps.load(data);
-    String configName = props.getStr(ZkController.CONFIGNAME_PROP);
-    assertEquals("conf2", configName);
-  }
-
-  @Test
-  public void testMaxNodesPerShard() throws Exception {
-
-    // test maxShardsPerNode
-    int numLiveNodes = cluster.getJettySolrRunners().size();
-    int numShards = (numLiveNodes/2) + 1;
-    int replicationFactor = 2;
-    int maxShardsPerNode = 1;
-
-    SolrException e = expectThrows(SolrException.class, () -> {
-      CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
-          .process(cluster.getSolrClient());
-    });
-
-  }
-
-  @Test
-  public void testCreateNodeSet() throws Exception {
-
-    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
-    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
-
-    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
-
-    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
-        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
-        .process(cluster.getSolrClient());
-
-    DocCollection collectionState = getCollectionState("nodeset_collection");
-    for (Replica replica : collectionState.getReplicas()) {
-      String replicaUrl = replica.getCoreUrl();
-      boolean matchingJetty = false;
-      for (String jettyUrl : baseUrls) {
-        if (replicaUrl.startsWith(jettyUrl))
-          matchingJetty = true;
-      }
-      if (matchingJetty == false)
-        fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
-    }
-
-  }
-
-  @Test
-  public void testCollectionsAPI() throws Exception {
-
-    // create new collections rapid fire
-    int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
-    CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
-
-    for (int i = 0; i < cnt; i++) {
-
-      int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
-      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
-      int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
-
-      createRequests[i]
-          = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
-          .setMaxShardsPerNode(maxShardsPerNode);
-      createRequests[i].processAsync(cluster.getSolrClient());
-    }
-
-    for (int i = 0; i < cnt; i++) {
-      String collectionName = "awhollynewcollection_" + i;
-      final int j = i;
-      waitForState("Expected to see collection " + collectionName, collectionName,
-          (n, c) -> {
-            CollectionAdminRequest.Create req = createRequests[j];
-            return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
-          });
-    }
-
-    cluster.injectChaos(random());
-
-    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
-      checkInstanceDirs(cluster.getJettySolrRunner(i));
-    }
-
-    String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
-
-    new UpdateRequest()
-        .add("id", "6")
-        .add("id", "7")
-        .add("id", "8")
-        .commit(cluster.getSolrClient(), collectionName);
-    TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (!timeOut.hasTimedOut()) {
-      try {
-        long numFound = cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound();
-        assertEquals(3, numFound);
-        break;
-      } catch (Exception e) {
-        // Query node can have stale clusterstate
-        log.info("Error when query " + collectionName, e);
-        Thread.sleep(500);
-      }
-    }
-    if (timeOut.hasTimedOut()) {
-      fail("Timeout on query " + collectionName);
-    }
-
-    checkNoTwoShardsUseTheSameIndexDir();
-  }
-
-  @Test
-  public void testCollectionReload() throws Exception {
-
-    final String collectionName = "reloaded_collection";
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
-
-    // get core open times
-    Map<String, Long> urlToTimeBefore = new HashMap<>();
-    collectStartTimes(collectionName, urlToTimeBefore);
-    assertTrue(urlToTimeBefore.size() > 0);
-
-    CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
-
-    // reloads make take a short while
-    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
-    assertTrue("some core start times did not change on reload", allTimesAreCorrect);
-  }
-
-  private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
-    CoreContainer cores = jetty.getCoreContainer();
-    Collection<SolrCore> theCores = cores.getCores();
-    for (SolrCore core : theCores) {
-
-      // look for core props file
-      Path instancedir = (Path) core.getResourceLoader().getInstancePath();
-      assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
-
-      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
-
-      assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
-
-    }
-  }
-
-  private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
-
-
-    TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-
-    boolean allTimesAreCorrect = false;
-    while (! timeout.hasTimedOut()) {
-      Map<String,Long> urlToTimeAfter = new HashMap<>();
-      collectStartTimes(collectionName, urlToTimeAfter);
-      
-      boolean retry = false;
-      Set<Entry<String,Long>> entries = urlToTimeBefore.entrySet();
-      for (Entry<String,Long> entry : entries) {
-        Long beforeTime = entry.getValue();
-        Long afterTime = urlToTimeAfter.get(entry.getKey());
-        assertNotNull(afterTime);
-        if (afterTime <= beforeTime) {
-          retry = true;
-          break;
-        }
-
-      }
-      if (!retry) {
-        allTimesAreCorrect = true;
-        break;
-      }
-    }
-    return allTimesAreCorrect;
-  }
-
-  private void collectStartTimes(String collectionName, Map<String,Long> urlToTime)
-      throws SolrServerException, IOException {
-
-    DocCollection collectionState = getCollectionState(collectionName);
-    if (collectionState != null) {
-      for (Slice shard : collectionState) {
-        for (Replica replica : shard) {
-          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
-          CoreStatus coreStatus;
-          try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
-            coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
-          }
-          long before = coreStatus.getCoreStartTime().getTime();
-          urlToTime.put(coreProps.getCoreUrl(), before);
-        }
-      }
-    } else {
-      throw new IllegalArgumentException("Could not find collection " + collectionName);
-    }
-  }
-  
-  private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
-    Map<String, Set<String>> indexDirToShardNamesMap = new HashMap<>();
-    
-    List<MBeanServer> servers = new LinkedList<>();
-    servers.add(ManagementFactory.getPlatformMBeanServer());
-    servers.addAll(MBeanServerFactory.findMBeanServer(null));
-    for (final MBeanServer server : servers) {
-      Set<ObjectName> mbeans = new HashSet<>();
-      mbeans.addAll(server.queryNames(null, null));
-      for (final ObjectName mbean : mbeans) {
-
-        try {
-          Map<String, String> props = mbean.getKeyPropertyList();
-          String category = props.get("category");
-          String name = props.get("name");
-          if ((category != null && category.toString().equals(Category.CORE.toString())) &&
-              (name != null && name.equals("indexDir"))) {
-            String indexDir = server.getAttribute(mbean, "Value").toString();
-            String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
-            if (!indexDirToShardNamesMap.containsKey(indexDir)) {
-              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
-            }
-            indexDirToShardNamesMap.get(indexDir.toString()).add(key);
-          }
-        } catch (Exception e) {
-          // ignore, just continue - probably a "Value" attribute
-          // not found
-        }
-      }
-    }
-    
-    assertTrue(
-        "Something is broken in the assert for no shards using the same indexDir - probably something was changed in the attributes published in the MBean of "
-            + SolrCore.class.getSimpleName() + " : " + indexDirToShardNamesMap,
-        indexDirToShardNamesMap.size() > 0);
-    for (Entry<String,Set<String>> entry : indexDirToShardNamesMap.entrySet()) {
-      if (entry.getValue().size() > 1) {
-        fail("We have shards using the same indexDir. E.g. shards "
-            + entry.getValue().toString() + " all use indexDir "
-            + entry.getKey());
-      }
-    }
-
-  }
-
-  @Test
-  @LogLevel("org.apache.solr.cloud=DEBUG")
-  public void addReplicaTest() throws Exception {
-    String collectionName = "addReplicaColl";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
-        .setMaxShardsPerNode(4)
-        .process(cluster.getSolrClient());
-
-    ArrayList<String> nodeList
-        = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
-    Collections.shuffle(nodeList, random());
-
-    CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .setNode(nodeList.get(0))
-        .process(cluster.getSolrClient());
-    Replica newReplica = grabNewReplica(response, getCollectionState(collectionName));
-
-    assertEquals("Replica should be created on the right node",
-        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
-        newReplica.getStr(ZkStateReader.BASE_URL_PROP));
-
-    Path instancePath = createTempDir();
-    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
-        .process(cluster.getSolrClient());
-    newReplica = grabNewReplica(response, getCollectionState(collectionName));
-    assertNotNull(newReplica);
-
-    try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
-      CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
-      NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
-      String instanceDirStr = (String) coreStatus.get("instanceDir");
-      assertEquals(instanceDirStr, instancePath.toString());
-    }
-
-    //Test to make sure we can't create another replica with an existing core_name of that collection
-    String coreName = newReplica.getStr(CORE_NAME_PROP);
-    SolrException e = expectThrows(SolrException.class, () -> {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", "addreplica");
-      params.set("collection", collectionName);
-      params.set("shard", "shard1");
-      params.set("name", coreName);
-      QueryRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      cluster.getSolrClient().request(request);
-    });
-
-    assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
-
-    // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
-    // for ADDREPLICA, this is "property.name". See SOLR-7132
-    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty(CoreAdminParams.NAME, "propertyDotName")
-        .process(cluster.getSolrClient());
-
-    newReplica = grabNewReplica(response, getCollectionState(collectionName));
-    assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
-
-  }
-
-  private Replica grabNewReplica(CollectionAdminResponse response, DocCollection docCollection) {
-    String replicaName = response.getCollectionCoresStatus().keySet().iterator().next();
-    Optional<Replica> optional = docCollection.getReplicas().stream()
-        .filter(replica -> replicaName.equals(replica.getCoreName()))
-        .findAny();
-    if (optional.isPresent()) {
-      return optional.get();
-    }
-    throw new AssertionError("Can not find " + replicaName + " from " + docCollection);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
deleted file mode 100644
index 57d38cd..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.lucene.util.LuceneTestCase.Nightly;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Nightly
-public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
-  
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private MiniSolrCloudCluster solrCluster;
-  
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    solrCluster = new MiniSolrCloudCluster(1, createTempDir(), buildJettyConfig("/solr"));
-  }
-  
-  @Override
-  @After
-  public void tearDown() throws Exception {
-    solrCluster.shutdown();
-    super.tearDown();
-  }
-  
-  public void testConcurrentCreateAndDeleteDoesNotFail() {
-    final AtomicReference<Exception> failure = new AtomicReference<>();
-    final int timeToRunSec = 30;
-    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[10];
-    for (int i = 0; i < threads.length; i++) {
-      final String collectionName = "collection" + i;
-      uploadConfig(configset("configset-2"), collectionName);
-      final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
-      final SolrClient solrClient = getHttpSolrClient(baseUrl);
-      threads[i] = new CreateDeleteSearchCollectionThread("create-delete-search-" + i, collectionName, collectionName, 
-          timeToRunSec, solrClient, failure);
-    }
-    
-    startAll(threads);
-    joinAll(threads);
-    
-    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
-  }
-  
-  public void testConcurrentCreateAndDeleteOverTheSameConfig() {
-    final String configName = "testconfig";
-    uploadConfig(configset("configset-2"), configName); // upload config once, to be used by all collections
-    final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
-    final AtomicReference<Exception> failure = new AtomicReference<>();
-    final int timeToRunSec = 30;
-    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[2];
-    for (int i = 0; i < threads.length; i++) {
-      final String collectionName = "collection" + i;
-      final SolrClient solrClient = getHttpSolrClient(baseUrl);
-      threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName,
-                                                    timeToRunSec, solrClient, failure);
-    }
-
-    startAll(threads);
-    joinAll(threads);
-
-    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
-  }
-  
-  private void uploadConfig(Path configDir, String configName) {
-    try {
-      solrCluster.uploadConfigSet(configDir, configName);
-    } catch (IOException | KeeperException | InterruptedException e) {
-      throw new RuntimeException(e);
-    }
-  }
-  
-  private void joinAll(final CreateDeleteCollectionThread[] threads) {
-    for (CreateDeleteCollectionThread t : threads) {
-      try {
-        t.joinAndClose();
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        throw new RuntimeException(e);
-      }
-    }
-  }
-  
-  private void startAll(final Thread[] threads) {
-    for (Thread t : threads) {
-      t.start();
-    }
-  }
-  
-  private static class CreateDeleteCollectionThread extends Thread {
-    protected final String collectionName;
-    protected final String configName;
-    protected final long timeToRunSec;
-    protected final SolrClient solrClient;
-    protected final AtomicReference<Exception> failure;
-    
-    public CreateDeleteCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
-        SolrClient solrClient, AtomicReference<Exception> failure) {
-      super(name);
-      this.collectionName = collectionName;
-      this.timeToRunSec = timeToRunSec;
-      this.solrClient = solrClient;
-      this.failure = failure;
-      this.configName = configName;
-    }
-    
-    @Override
-    public void run() {
-      final TimeOut timeout = new TimeOut(timeToRunSec, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-      while (! timeout.hasTimedOut() && failure.get() == null) {
-        doWork();
-      }
-    }
-    
-    protected void doWork() {
-      createCollection();
-      deleteCollection();
-    }
-    
-    protected void addFailure(Exception e) {
-      log.error("Add Failure", e);
-      synchronized (failure) {
-        if (failure.get() != null) {
-          failure.get().addSuppressed(e);
-        } else {
-          failure.set(e);
-        }
-      }
-    }
-    
-    private void createCollection() {
-      try {
-        final CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName,configName,1,1)
-                .process(solrClient);
-        if (response.getStatus() != 0) {
-          addFailure(new RuntimeException("failed to create collection " + collectionName));
-        }
-      } catch (Exception e) {
-        addFailure(e);
-      }
-      
-    }
-    
-    private void deleteCollection() {
-      try {
-        final CollectionAdminRequest.Delete deleteCollectionRequest
-          = CollectionAdminRequest.deleteCollection(collectionName);
-        final CollectionAdminResponse response = deleteCollectionRequest.process(solrClient);
-        if (response.getStatus() != 0) {
-          addFailure(new RuntimeException("failed to delete collection " + collectionName));
-        }
-      } catch (Exception e) {
-        addFailure(e);
-      }
-    }
-    
-    public void joinAndClose() throws InterruptedException {
-      try {
-        super.join(60000);
-      } finally {
-        IOUtils.closeQuietly(solrClient);
-      }
-    }
-  }
-  
-  private static class CreateDeleteSearchCollectionThread extends CreateDeleteCollectionThread {
-
-    public CreateDeleteSearchCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
-        SolrClient solrClient, AtomicReference<Exception> failure) {
-      super(name, collectionName, configName, timeToRunSec, solrClient, failure);
-    }
-    
-    @Override
-    protected void doWork() {
-      super.doWork();
-      searchNonExistingCollection();
-    }
-    
-    private void searchNonExistingCollection() {
-      try {
-        solrClient.query(collectionName, new SolrQuery("*"));
-      } catch (Exception e) {
-        if (!e.getMessage().contains("not found") && !e.getMessage().contains("Can not find")) {
-          addFailure(e);
-        }
-      }
-    }
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
deleted file mode 100644
index 63a3272..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.Map;
-
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.params.ShardParams._ROUTE_;
-
-/**
- * Tests the Custom Sharding API.
- */
-public class CustomCollectionTest extends SolrCloudTestCase {
-
-  private static final int NODE_COUNT = 4;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(NODE_COUNT)
-        .addConfig("conf", configset("cloud-dynamic"))
-        .configure();
-  }
-
-  @Before
-  public void ensureClusterEmpty() throws Exception {
-    cluster.deleteAllCollections();
-  }
-
-  @Test
-  public void testCustomCollectionsAPI() throws Exception {
-
-    final String collection = "implicitcoll";
-    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
-    int numShards = 3;
-    int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
-
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .process(cluster.getSolrClient());
-
-    DocCollection coll = getCollectionState(collection);
-    assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
-    assertNotNull(coll.getStr(REPLICATION_FACTOR));
-    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
-    assertNull("A shard of a Collection configured with implicit router must have null range",
-        coll.getSlice("a").getRange());
-
-    new UpdateRequest()
-        .add("id", "6")
-        .add("id", "7")
-        .add("id", "8")
-        .withRoute("a")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-
-    cluster.getSolrClient().deleteByQuery(collection, "*:*");
-    cluster.getSolrClient().commit(collection, true, true);
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-
-    new UpdateRequest()
-        .add("id", "9")
-        .add("id", "10")
-        .add("id", "11")
-        .withRoute("c")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
-
-    //Testing CREATESHARD
-    CollectionAdminRequest.createShard(collection, "x")
-        .process(cluster.getSolrClient());
-    waitForState("Expected shard 'x' to be active", collection, (n, c) -> {
-      if (c.getSlice("x") == null)
-        return false;
-      for (Replica r : c.getSlice("x")) {
-        if (r.getState() != Replica.State.ACTIVE)
-          return false;
-      }
-      return true;
-    });
-
-    new UpdateRequest()
-        .add("id", "66", _ROUTE_, "x")
-        .commit(cluster.getSolrClient(), collection);
-    // TODO - the local state is cached and causes the request to fail with 'unknown shard'
-    // assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "x")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testRouteFieldForImplicitRouter() throws Exception {
-
-    int numShards = 4;
-    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
-    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
-    String shard_fld = "shard_s";
-
-    final String collection = "withShardField";
-
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .setRouterField(shard_fld)
-        .process(cluster.getSolrClient());
-
-    new UpdateRequest()
-        .add("id", "6", shard_fld, "a")
-        .add("id", "7", shard_fld, "a")
-        .add("id", "8", shard_fld, "b")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testRouteFieldForHashRouter()throws Exception{
-    String collectionName = "routeFieldColl";
-    int numShards = 4;
-    int replicationFactor = 2;
-    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
-    String shard_fld = "shard_s";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .setRouterField(shard_fld)
-        .process(cluster.getSolrClient());
-
-    new UpdateRequest()
-        .add("id", "6", shard_fld, "a")
-        .add("id", "7", shard_fld, "a")
-        .add("id", "8", shard_fld, "b")
-        .commit(cluster.getSolrClient(), collectionName);
-
-    assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(2, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
-
-
-    cluster.getSolrClient().deleteByQuery(collectionName, "*:*");
-    cluster.getSolrClient().commit(collectionName);
-
-    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "100", shard_fld, "c!doc1"));
-    cluster.getSolrClient().commit(collectionName);
-    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c!")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testCreateShardRepFactor() throws Exception  {
-    final String collectionName = "testCreateShardRepFactor";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
-        .process(cluster.getSolrClient());
-
-    CollectionAdminRequest.createShard(collectionName, "x")
-        .process(cluster.getSolrClient());
-
-    waitForState("Not enough active replicas in shard 'x'", collectionName, (n, c) -> {
-      return c.getSlice("x").getReplicas().size() == 1;
-    });
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 8a46808..2775a0c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -17,8 +17,18 @@
 package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.Set;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
@@ -32,6 +42,7 @@ import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.cloud.Overseer.LeaderStatus;
 import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
@@ -65,7 +76,18 @@ import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.isNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
 
@@ -572,7 +594,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
       }
     }
     
-    if (random().nextBoolean()) Collections.shuffle(createNodeList, OverseerCollectionMessageHandler.RANDOM);
+    if (random().nextBoolean()) Collections.shuffle(createNodeList, random());
 
     underTest = new OverseerCollectionConfigSetProcessorToBeTested(zkStateReaderMock,
         "1234", shardHandlerFactoryMock, ADMIN_PATH, workQueueMock, runningMapMock,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
index 54b66a0..9a86912 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.response.SolrResponseBase;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CommonAdminParams;
 import org.apache.solr.common.params.CommonParams;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
deleted file mode 100644
index a3fbb32..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-// Collect useful operations for testing assigning properties to individual replicas
-// Could probably expand this to do something creative with getting random slices
-// and shards, but for now this will do.
-public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
-
-  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
-    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    for (int idx = 0; idx < paramsIn.length; idx += 2) {
-      params.set(paramsIn[idx], paramsIn[idx + 1]);
-    }
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    return client.request(request);
-  }
-
-  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
-                                String property)
-      throws KeeperException, InterruptedException {
-    ClusterState clusterState = null;
-    Replica replica = null;
-    for (int idx = 0; idx < 300; ++idx) {
-      clusterState = client.getZkStateReader().getClusterState();
-      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
-      if (replica == null) {
-        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-      }
-      if (StringUtils.isBlank(replica.getProperty(property))) return;
-      Thread.sleep(100);
-    }
-    fail("Property " + property + " not set correctly for collection/replica pair: " +
-        collectionName + "/" + replicaName + ". Replica props: " + replica.getProperties().toString() +
-        ". Cluster state is " + clusterState.toString());
-
-  }
-
-  // The params are triplets,
-  // collection
-  // shard
-  // replica
-  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
-                         String replicaName, String property, String val)
-      throws InterruptedException, KeeperException {
-    Replica replica = null;
-    ClusterState clusterState = null;
-
-    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
-      clusterState = client.getZkStateReader().getClusterState();
-      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
-      if (replica == null) {
-        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-      }
-      if (StringUtils.equals(val, replica.getProperty(property))) return;
-      Thread.sleep(100);
-    }
-
-    fail("Property '" + property + "' with value " + replica.getProperty(property) +
-        " not set correctly for collection/replica pair: " + collectionName + "/" + replicaName + " property map is " +
-        replica.getProperties().toString() + ".");
-
-  }
-
-  // Verify that
-  // 1> the property is only set once in all the replicas in a slice.
-  // 2> the property is balanced evenly across all the nodes hosting collection
-  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
-                                    String property) throws KeeperException, InterruptedException {
-    verifyUnique(client, collectionName, property, true);
-  }
-
-  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
-                            String property) throws KeeperException, InterruptedException {
-    verifyUnique(client, collectionName, property, false);
-  }
-
-  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
-      throws KeeperException, InterruptedException {
-
-    DocCollection col = null;
-    for (int idx = 0; idx < 300; ++idx) {
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
-
-      col = clusterState.getCollection(collectionName);
-      if (col == null) {
-        fail("Could not find collection " + collectionName);
-      }
-      Map<String, Integer> counts = new HashMap<>();
-      Set<String> uniqueNodes = new HashSet<>();
-      boolean allSlicesHaveProp = true;
-      boolean badSlice = false;
-      for (Slice slice : col.getSlices()) {
-        boolean thisSliceHasProp = false;
-        int propCount = 0;
-        for (Replica replica : slice.getReplicas()) {
-          uniqueNodes.add(replica.getNodeName());
-          String propVal = replica.getProperty(property);
-          if (StringUtils.isNotBlank(propVal)) {
-            ++propCount;
-            if (counts.containsKey(replica.getNodeName()) == false) {
-              counts.put(replica.getNodeName(), 0);
-            }
-            int count = counts.get(replica.getNodeName());
-            thisSliceHasProp = true;
-            counts.put(replica.getNodeName(), count + 1);
-          }
-        }
-        badSlice = (propCount > 1) ? true : badSlice;
-        allSlicesHaveProp = allSlicesHaveProp ? thisSliceHasProp : allSlicesHaveProp;
-      }
-      if (balanced == false && badSlice == false) {
-        return;
-      }
-      if (allSlicesHaveProp && balanced) {
-        // Check that the properties are evenly distributed.
-        int minProps = col.getSlices().size() / uniqueNodes.size();
-        int maxProps = minProps;
-
-        if (col.getSlices().size() % uniqueNodes.size() > 0) {
-          ++maxProps;
-        }
-        boolean doSleep = false;
-        for (Map.Entry<String, Integer> ent : counts.entrySet()) {
-          if (ent.getValue() != minProps && ent.getValue() != maxProps) {
-            doSleep = true;
-          }
-        }
-
-        if (doSleep == false) {
-          assertTrue("We really shouldn't be calling this if there is no node with the property " + property,
-              counts.size() > 0);
-          return;
-        }
-      }
-      Thread.sleep(100);
-    }
-    fail("Collection " + collectionName + " does not have roles evenly distributed. Collection is: " + col.toString());
-  }
-
-}


[19/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
new file mode 100644
index 0000000..c411fbc
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+
+import org.apache.lucene.util.Version;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public BackupCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String backupName = message.getStr(NAME);
+    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
+
+    Instant startTime = Instant.now();
+
+    CoreContainer cc = ocmh.overseer.getCoreContainer();
+    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
+    BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader);
+
+    // Backup location
+    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+    URI backupPath = repository.resolve(location, backupName);
+
+    //Validating if the directory already exists.
+    if (repository.exists(backupPath)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup directory already exists: " + backupPath);
+    }
+
+    // Create a directory to store backup details.
+    repository.createDirectory(backupPath);
+
+    String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
+    switch (strategy) {
+      case CollectionAdminParams.COPY_FILES_STRATEGY: {
+        copyIndexFiles(backupPath, message, results);
+        break;
+      }
+      case CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY: {
+        break;
+      }
+    }
+
+    log.info("Starting to backup ZK data for backupName={}", backupName);
+
+    //Download the configs
+    String configName = ocmh.zkStateReader.readConfigName(collectionName);
+    backupMgr.downloadConfigDir(location, backupName, configName);
+
+    //Save the collection's state. Can be part of the monolithic clusterstate.json or a individual state.json
+    //Since we don't want to distinguish we extract the state and back it up as a separate json
+    DocCollection collectionState = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
+    backupMgr.writeCollectionState(location, backupName, collectionName, collectionState);
+
+    Properties properties = new Properties();
+
+    properties.put(BackupManager.BACKUP_NAME_PROP, backupName);
+    properties.put(BackupManager.COLLECTION_NAME_PROP, collectionName);
+    properties.put(OverseerCollectionMessageHandler.COLL_CONF, configName);
+    properties.put(BackupManager.START_TIME_PROP, startTime.toString());
+    properties.put(BackupManager.INDEX_VERSION_PROP, Version.LATEST.toString());
+    //TODO: Add MD5 of the configset. If during restore the same name configset exists then we can compare checksums to see if they are the same.
+    //if they are not the same then we can throw an error or have an 'overwriteConfig' flag
+    //TODO save numDocs for the shardLeader. We can use it to sanity check the restore.
+
+    backupMgr.writeBackupProperties(location, backupName, properties);
+
+    log.info("Completed backing up ZK data for backupName={}", backupName);
+  }
+
+  private Replica selectReplicaWithSnapshot(CollectionSnapshotMetaData snapshotMeta, Slice slice) {
+    // The goal here is to choose the snapshot of the replica which was the leader at the time snapshot was created.
+    // If that is not possible, we choose any other replica for the given shard.
+    Collection<CoreSnapshotMetaData> snapshots = snapshotMeta.getReplicaSnapshotsForShard(slice.getName());
+
+    Optional<CoreSnapshotMetaData> leaderCore = snapshots.stream().filter(x -> x.isLeader()).findFirst();
+    if (leaderCore.isPresent()) {
+      log.info("Replica {} was the leader when snapshot {} was created.", leaderCore.get().getCoreName(), snapshotMeta.getName());
+      Replica r = slice.getReplica(leaderCore.get().getCoreName());
+      if ((r != null) && !r.getState().equals(State.DOWN)) {
+        return r;
+      }
+    }
+
+    Optional<Replica> r = slice.getReplicas().stream()
+                               .filter(x -> x.getState() != State.DOWN && snapshotMeta.isSnapshotExists(slice.getName(), x))
+                               .findFirst();
+
+    if (!r.isPresent()) {
+      throw new SolrException(ErrorCode.SERVER_ERROR,
+          "Unable to find any live replica with a snapshot named " + snapshotMeta.getName() + " for shard " + slice.getName());
+    }
+
+    return r.get();
+  }
+
+  private void copyIndexFiles(URI backupPath, ZkNodeProps request, NamedList results) throws Exception {
+    String collectionName = request.getStr(COLLECTION_PROP);
+    String backupName = request.getStr(NAME);
+    String asyncId = request.getStr(ASYNC);
+    String repoName = request.getStr(CoreAdminParams.BACKUP_REPOSITORY);
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    Map<String, String> requestMap = new HashMap<>();
+
+    String commitName = request.getStr(CoreAdminParams.COMMIT_NAME);
+    Optional<CollectionSnapshotMetaData> snapshotMeta = Optional.empty();
+    if (commitName != null) {
+      SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
+      snapshotMeta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
+      if (!snapshotMeta.isPresent()) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
+            + " does not exist for collection " + collectionName);
+      }
+      if (snapshotMeta.get().getStatus() != SnapshotStatus.Successful) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName + " for collection " + collectionName
+            + " has not completed successfully. The status is " + snapshotMeta.get().getStatus());
+      }
+    }
+
+    log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
+        backupPath);
+
+    Collection<String> shardsToConsider = Collections.emptySet();
+    if (snapshotMeta.isPresent()) {
+      shardsToConsider = snapshotMeta.get().getShards();
+    }
+
+    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getActiveSlices()) {
+      Replica replica = null;
+
+      if (snapshotMeta.isPresent()) {
+        if (!shardsToConsider.contains(slice.getName())) {
+          log.warn("Skipping the backup for shard {} since it wasn't part of the collection {} when snapshot {} was created.",
+              slice.getName(), collectionName, snapshotMeta.get().getName());
+          continue;
+        }
+        replica = selectReplicaWithSnapshot(snapshotMeta.get(), slice);
+      } else {
+        // Note - Actually this can return a null value when there is no leader for this shard.
+        replica = slice.getLeader();
+        if (replica == null) {
+          throw new SolrException(ErrorCode.SERVER_ERROR, "No 'leader' replica available for shard " + slice.getName() + " of collection " + collectionName);
+        }
+      }
+
+      String coreName = replica.getStr(CORE_NAME_PROP);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
+      params.set(NAME, slice.getName());
+      params.set(CoreAdminParams.BACKUP_REPOSITORY, repoName);
+      params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
+      params.set(CORE_NAME_PROP, coreName);
+      if (snapshotMeta.isPresent()) {
+        params.set(CoreAdminParams.COMMIT_NAME, snapshotMeta.get().getName());
+      }
+
+      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
+      log.debug("Sent backup request to core={} for backupName={}", coreName, backupName);
+    }
+    log.debug("Sent backup requests to all shard leaders for backupName={}", backupName);
+
+    ocmh.processResponses(results, shardHandler, true, "Could not backup all replicas", asyncId, requestMap);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
new file mode 100644
index 0000000..c54d792
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
@@ -0,0 +1,100 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+
+public class CreateAliasCmd implements OverseerCollectionMessageHandler.Cmd {
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public CreateAliasCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results)
+      throws Exception {
+    final String aliasName = message.getStr(NAME);
+    final List<String> canonicalCollectionList = parseCollectionsParameter(message.get("collections"));
+    final String canonicalCollectionsString = StrUtils.join(canonicalCollectionList, ',');
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    validateAllCollectionsExistAndNoDups(canonicalCollectionList, zkStateReader);
+
+    zkStateReader.aliasesHolder.applyModificationAndExportToZk(aliases -> aliases.cloneWithCollectionAlias(aliasName, canonicalCollectionsString));
+
+    // Sleep a bit to allow ZooKeeper state propagation.
+    //
+    // THIS IS A KLUDGE.
+    //
+    // Solr's view of the cluster is eventually consistent. *Eventually* all nodes and CloudSolrClients will be aware of
+    // alias changes, but not immediately. If a newly created alias is queried, things should work right away since Solr
+    // will attempt to see if it needs to get the latest aliases when it can't otherwise resolve the name.  However
+    // modifications to an alias will take some time.
+    //
+    // We could levy this requirement on the client but they would probably always add an obligatory sleep, which is
+    // just kicking the can down the road.  Perhaps ideally at this juncture here we could somehow wait until all
+    // Solr nodes in the cluster have the latest aliases?
+    Thread.sleep(100);
+  }
+
+  private void validateAllCollectionsExistAndNoDups(List<String> collectionList, ZkStateReader zkStateReader) {
+    final String collectionStr = StrUtils.join(collectionList, ',');
+
+    if (new HashSet<>(collectionList).size() != collectionList.size()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', since it contains duplicates", collectionStr));
+    }
+    ClusterState clusterState = zkStateReader.getClusterState();
+    Set<String> aliasNames = zkStateReader.getAliases().getCollectionAliasListMap().keySet();
+    for (String collection : collectionList) {
+      if (clusterState.getCollectionOrNull(collection) == null && !aliasNames.contains(collection)) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', '%s' is not an existing collection or alias", collectionStr, collection));
+      }
+    }
+  }
+  
+  /**
+   * The v2 API directs that the 'collections' parameter be provided as a JSON array (e.g. ["a", "b"]).  We also
+   * maintain support for the legacy format, a comma-separated list (e.g. a,b).
+   */
+  @SuppressWarnings("unchecked")
+  private List<String> parseCollectionsParameter(Object colls) {
+    if (colls == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "missing collections param");
+    if (colls instanceof List) return (List<String>) colls;
+    return StrUtils.splitSmart(colls.toString(), ",", true).stream()
+        .map(String::trim)
+        .collect(Collectors.toList());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
new file mode 100644
index 0000000..4d9c971
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -0,0 +1,531 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.cloud.overseer.ClusterStateMutator;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.cloud.ZooKeeperException;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.handler.component.ShardRequest;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.StrUtils.formatString;
+
+public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+  private final DistribStateManager stateManager;
+
+  public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.stateManager = ocmh.cloudManager.getDistribStateManager();
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    final String collectionName = message.getStr(NAME);
+    final boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
+    log.info("Create collection {}", collectionName);
+    if (clusterState.hasCollection(collectionName)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
+    }
+
+    String configName = getConfigName(collectionName, message);
+    if (configName == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to associate with the collection.");
+    }
+
+    ocmh.validateConfigOrThrowSolrException(configName);
+    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+
+    try {
+
+      final String async = message.getStr(ASYNC);
+
+      List<String> nodeList = new ArrayList<>();
+      List<String> shardNames = new ArrayList<>();
+      List<ReplicaPosition> replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, message,
+          nodeList, shardNames, sessionWrapper);
+      ZkStateReader zkStateReader = ocmh.zkStateReader;
+      boolean isLegacyCloud = Overseer.isLegacy(zkStateReader);
+
+      ocmh.createConfNode(stateManager, configName, collectionName, isLegacyCloud);
+
+      Map<String,String> collectionParams = new HashMap<>();
+      Map<String,Object> collectionProps = message.getProperties();
+      for (String propName : collectionProps.keySet()) {
+        if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
+          collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), (String) collectionProps.get(propName));
+        }
+      }
+      
+      createCollectionZkNode(stateManager, collectionName, collectionParams);
+      
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+
+      // wait for a while until we don't see the collection
+      TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+      boolean created = false;
+      while (! waitUntil.hasTimedOut()) {
+        waitUntil.sleep(100);
+        created = ocmh.cloudManager.getClusterStateProvider().getClusterState().hasCollection(collectionName);
+        if(created) break;
+      }
+      if (!created)
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
+
+      if (nodeList.isEmpty()) {
+        log.debug("Finished create command for collection: {}", collectionName);
+        return;
+      }
+
+      // For tracking async calls.
+      Map<String, String> requestMap = new HashMap<>();
+
+
+      log.debug(formatString("Creating SolrCores for new collection {0}, shardNames {1} , message : {2}",
+          collectionName, shardNames, message));
+      Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
+      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+      for (ReplicaPosition replicaPosition : replicaPositions) {
+        String nodeName = replicaPosition.node;
+        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
+            ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
+            replicaPosition.shard, replicaPosition.type, true);
+        log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
+            , coreName, replicaPosition.shard, collectionName, nodeName));
+
+
+        String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
+        //in the new mode, create the replica in clusterstate prior to creating the core.
+        // Otherwise the core creation fails
+        if (!isLegacyCloud) {
+          ZkNodeProps props = new ZkNodeProps(
+              Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
+              ZkStateReader.COLLECTION_PROP, collectionName,
+              ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
+              ZkStateReader.CORE_NAME_PROP, coreName,
+              ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+              ZkStateReader.BASE_URL_PROP, baseUrl,
+              ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
+              CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+        }
+
+        // Need to create new params for each request
+        ModifiableSolrParams params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
+
+        params.set(CoreAdminParams.NAME, coreName);
+        params.set(COLL_CONF, configName);
+        params.set(CoreAdminParams.COLLECTION, collectionName);
+        params.set(CoreAdminParams.SHARD, replicaPosition.shard);
+        params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
+        params.set(CoreAdminParams.NEW_COLLECTION, "true");
+        params.set(CoreAdminParams.REPLICA_TYPE, replicaPosition.type.name());
+
+        if (async != null) {
+          String coreAdminAsyncId = async + Math.abs(System.nanoTime());
+          params.add(ASYNC, coreAdminAsyncId);
+          requestMap.put(nodeName, coreAdminAsyncId);
+        }
+        ocmh.addPropertyParams(message, params);
+
+        ShardRequest sreq = new ShardRequest();
+        sreq.nodeName = nodeName;
+        params.set("qt", ocmh.adminPath);
+        sreq.purpose = 1;
+        sreq.shards = new String[]{baseUrl};
+        sreq.actualShards = sreq.shards;
+        sreq.params = params;
+
+        if (isLegacyCloud) {
+          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
+        } else {
+          coresToCreate.put(coreName, sreq);
+        }
+      }
+
+      if(!isLegacyCloud) {
+        // wait for all replica entries to be created
+        Map<String, Replica> replicas = ocmh.waitToSeeReplicasInState(collectionName, coresToCreate.keySet());
+        for (Map.Entry<String, ShardRequest> e : coresToCreate.entrySet()) {
+          ShardRequest sreq = e.getValue();
+          sreq.params.set(CoreAdminParams.CORE_NODE_NAME, replicas.get(e.getKey()).getName());
+          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
+        }
+      }
+
+      ocmh.processResponses(results, shardHandler, false, null, async, requestMap, Collections.emptySet());
+      if(results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size() > 0) {
+        // Let's cleanup as we hit an exception
+        // We shouldn't be passing 'results' here for the cleanup as the response would then contain 'success'
+        // element, which may be interpreted by the user as a positive ack
+        ocmh.cleanupCollection(collectionName, new NamedList());
+        log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
+      } else {
+        log.debug("Finished create command on all shards for collection: {}", collectionName);
+
+        // Emit a warning about production use of data driven functionality
+        boolean defaultConfigSetUsed = message.getStr(COLL_CONF) == null ||
+            message.getStr(COLL_CONF).equals(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
+        if (defaultConfigSetUsed) {
+          results.add("warning", "Using _default configset. Data driven schema functionality"
+              + " is enabled by default, which is NOT RECOMMENDED for production use. To turn it off:"
+              + " curl http://{host:port}/solr/" + collectionName + "/config -d '{\"set-user-property\": {\"update.autoCreateFields\":\"false\"}}'");
+        }
+      }
+    } catch (SolrException ex) {
+      throw ex;
+    } catch (Exception ex) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, ex);
+    } finally {
+      if (sessionWrapper.get() != null) sessionWrapper.get().release();
+    }
+  }
+
+  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                            ZkNodeProps message,
+                                                            List<String> nodeList, List<String> shardNames,
+                                                            AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+    final String collectionName = message.getStr(NAME);
+    // look at the replication factor and see if it matches reality
+    // if it does not, find best nodes to create more cores
+    int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
+    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
+    int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    String policy = message.getStr(Policy.POLICY);
+    boolean usePolicyFramework = !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || policy != null;
+
+    Integer numSlices = message.getInt(OverseerCollectionMessageHandler.NUM_SLICES, null);
+    String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
+    if(ImplicitDocRouter.NAME.equals(router)){
+      ClusterStateMutator.getShardNames(shardNames, message.getStr("shards", null));
+      numSlices = shardNames.size();
+    } else {
+      if (numSlices == null ) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " is a required param (when using CompositeId router).");
+      }
+      if (numSlices <= 0) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " must be > 0");
+      }
+      ClusterStateMutator.getShardNames(numSlices, shardNames);
+    }
+
+    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
+    if (usePolicyFramework && message.getStr(MAX_SHARDS_PER_NODE) != null && maxShardsPerNode > 0) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "'maxShardsPerNode>0' is not supported when autoScaling policies are used");
+    }
+    if (maxShardsPerNode == -1 || usePolicyFramework) maxShardsPerNode = Integer.MAX_VALUE;
+    if (numNrtReplicas + numTlogReplicas <= 0) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
+    }
+
+    // we need to look at every node and see how many cores it serves
+    // add our new cores to existing nodes serving the least number of cores
+    // but (for now) require that each core goes on a distinct node.
+
+    List<ReplicaPosition> replicaPositions;
+    nodeList.addAll(Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM));
+    if (nodeList.isEmpty()) {
+      log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
+
+      replicaPositions = new ArrayList<>();
+    } else {
+      int totalNumReplicas = numNrtReplicas + numTlogReplicas + numPullReplicas;
+      if (totalNumReplicas > nodeList.size()) {
+        log.warn("Specified number of replicas of "
+            + totalNumReplicas
+            + " on collection "
+            + collectionName
+            + " is higher than the number of Solr instances currently live or live and part of your " + OverseerCollectionMessageHandler.CREATE_NODE_SET + "("
+            + nodeList.size()
+            + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
+      }
+
+      int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
+          Integer.MAX_VALUE :
+          maxShardsPerNode * nodeList.size();
+      int requestedShardsToCreate = numSlices * totalNumReplicas;
+      if (maxShardsAllowedToCreate < requestedShardsToCreate) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
+            + MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
+            + ", and the number of nodes currently live or live and part of your "+OverseerCollectionMessageHandler.CREATE_NODE_SET+" is " + nodeList.size()
+            + ". This allows a maximum of " + maxShardsAllowedToCreate
+            + " to be created. Value of " + OverseerCollectionMessageHandler.NUM_SLICES + " is " + numSlices
+            + ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
+            + ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
+            + " and value of " + PULL_REPLICAS + " is " + numPullReplicas
+            + ". This requires " + requestedShardsToCreate
+            + " shards to be created (higher than the allowed number)");
+      }
+      replicaPositions = Assign.identifyNodes(cloudManager
+          , clusterState, nodeList, collectionName, message, shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas);
+      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+    }
+    return replicaPositions;
+  }
+
+  String getConfigName(String coll, ZkNodeProps message) throws KeeperException, InterruptedException {
+    String configName = message.getStr(COLL_CONF);
+
+    if (configName == null) {
+      // if there is only one conf, use that
+      List<String> configNames = null;
+      try {
+        configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
+        if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
+          if (!CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
+            copyDefaultConfigSetTo(configNames, coll);
+          }
+          return coll;
+        } else if (configNames != null && configNames.size() == 1) {
+          configName = configNames.get(0);
+          // no config set named, but there is only 1 - use it
+          log.info("Only one config set found in zk - using it:" + configName);
+        }
+      } catch (KeeperException.NoNodeException e) {
+
+      }
+    }
+    return "".equals(configName)? null: configName;
+  }
+  
+  /**
+   * Copies the _default configset to the specified configset name (overwrites if pre-existing)
+   */
+  private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
+    ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
+
+    // if a configset named coll exists, delete the configset so that _default can be copied over
+    if (configNames.contains(targetConfig)) {
+      log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
+          ", deleting it so that we can copy the _default configs over and create the collection.");
+      try {
+        configManager.deleteConfigDir(targetConfig);
+      } catch (Exception e) {
+        throw new SolrException(ErrorCode.INVALID_STATE, "Error while deleting configset: " + targetConfig, e);
+      }
+    } else {
+      log.info("Only _default config set found, using it.");
+    }
+    // Copy _default into targetConfig
+    try {
+      configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
+    } catch (Exception e) {
+      throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
+    }
+  }
+
+  public static void createCollectionZkNode(DistribStateManager stateManager, String collection, Map<String,String> params) {
+    log.debug("Check for collection zkNode:" + collection);
+    String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
+
+    try {
+      if (!stateManager.hasData(collectionPath)) {
+        log.debug("Creating collection in ZooKeeper:" + collection);
+
+        try {
+          Map<String,Object> collectionProps = new HashMap<>();
+
+          // TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that?
+          String defaultConfigName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, collection);
+
+          if (params.size() > 0) {
+            collectionProps.putAll(params);
+            // if the config name wasn't passed in, use the default
+            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP)) {
+              // users can create the collection node and conf link ahead of time, or this may return another option
+              getConfName(stateManager, collection, collectionPath, collectionProps);
+            }
+
+          } else if (System.getProperty("bootstrap_confdir") != null) {
+            // if we are bootstrapping a collection, default the config for
+            // a new collection to the collection we are bootstrapping
+            log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
+
+            Properties sysProps = System.getProperties();
+            for (String sprop : System.getProperties().stringPropertyNames()) {
+              if (sprop.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
+                collectionProps.put(sprop.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
+              }
+            }
+
+            // if the config name wasn't passed in, use the default
+            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP))
+              collectionProps.put(ZkController.CONFIGNAME_PROP, defaultConfigName);
+
+          } else if (Boolean.getBoolean("bootstrap_conf")) {
+            // the conf name should should be the collection name of this core
+            collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
+          } else {
+            getConfName(stateManager, collection, collectionPath, collectionProps);
+          }
+
+          collectionProps.remove(ZkStateReader.NUM_SHARDS_PROP);  // we don't put numShards in the collections properties
+
+          ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
+          stateManager.makePath(collectionPath, Utils.toJSON(zkProps), CreateMode.PERSISTENT, false);
+
+        } catch (KeeperException e) {
+          // it's okay if the node already exists
+          if (e.code() != KeeperException.Code.NODEEXISTS) {
+            throw e;
+          }
+        } catch (AlreadyExistsException e) {
+          // it's okay if the node already exists
+        }
+      } else {
+        log.debug("Collection zkNode exists");
+      }
+
+    } catch (KeeperException e) {
+      // it's okay if another beats us creating the node
+      if (e.code() == KeeperException.Code.NODEEXISTS) {
+        return;
+      }
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+    } catch (IOException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+    } catch (InterruptedException e) {
+      Thread.interrupted();
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+    }
+
+  }
+  
+  private static void getConfName(DistribStateManager stateManager, String collection, String collectionPath, Map<String,Object> collectionProps) throws IOException,
+      KeeperException, InterruptedException {
+    // check for configName
+    log.debug("Looking for collection configName");
+    if (collectionProps.containsKey("configName")) {
+      log.info("configName was passed as a param {}", collectionProps.get("configName"));
+      return;
+    }
+
+    List<String> configNames = null;
+    int retry = 1;
+    int retryLimt = 6;
+    for (; retry < retryLimt; retry++) {
+      if (stateManager.hasData(collectionPath)) {
+        VersionedData data = stateManager.getData(collectionPath);
+        ZkNodeProps cProps = ZkNodeProps.load(data.getData());
+        if (cProps.containsKey(ZkController.CONFIGNAME_PROP)) {
+          break;
+        }
+      }
+
+      try {
+        configNames = stateManager.listData(ZkConfigManager.CONFIGS_ZKNODE);
+      } catch (NoSuchElementException | NoNodeException e) {
+        // just keep trying
+      }
+
+      // check if there's a config set with the same name as the collection
+      if (configNames != null && configNames.contains(collection)) {
+        log.info(
+            "Could not find explicit collection configName, but found config name matching collection name - using that set.");
+        collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
+        break;
+      }
+      // if _default exists, use that
+      if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
+        log.info(
+            "Could not find explicit collection configName, but found _default config set - using that set.");
+        collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
+        break;
+      }
+      // if there is only one conf, use that
+      if (configNames != null && configNames.size() == 1) {
+        // no config set named, but there is only 1 - use it
+        log.info("Only one config set found in zk - using it:" + configNames.get(0));
+        collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
+        break;
+      }
+
+      log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
+      Thread.sleep(3000);
+    }
+    if (retry == retryLimt) {
+      log.error("Could not find configName for collection " + collection);
+      throw new ZooKeeperException(
+          SolrException.ErrorCode.SERVER_ERROR,
+          "Could not find configName for collection " + collection + " found:" + configNames);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
new file mode 100644
index 0000000..311d9ef
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.cloud.CloudUtil;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class CreateShardCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public CreateShardCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String sliceName = message.getStr(SHARD_ID_PROP);
+    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+
+    log.info("Create shard invoked: {}", message);
+    if (collectionName == null || sliceName == null)
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
+
+    DocCollection collection = clusterState.getCollection(collectionName);
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+    SolrCloseableLatch countDownLatch;
+    try {
+      List<ReplicaPosition> positions = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, sessionWrapper);
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+      // wait for a while until we see the shard
+      ocmh.waitForNewShard(collectionName, sliceName);
+
+      String async = message.getStr(ASYNC);
+      countDownLatch = new SolrCloseableLatch(positions.size(), ocmh);
+      for (ReplicaPosition position : positions) {
+        String nodeName = position.node;
+        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(), collection, sliceName, position.type);
+        log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
+            + " on " + nodeName);
+
+        // Need to create new params for each request
+        ZkNodeProps addReplicasProps = new ZkNodeProps(
+            COLLECTION_PROP, collectionName,
+            SHARD_ID_PROP, sliceName,
+            ZkStateReader.REPLICA_TYPE, position.type.name(),
+            CoreAdminParams.NODE, nodeName,
+            CoreAdminParams.NAME, coreName,
+            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+        Map<String, Object> propertyParams = new HashMap<>();
+        ocmh.addPropertyParams(message, propertyParams);
+        addReplicasProps = addReplicasProps.plus(propertyParams);
+        if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
+        final NamedList addResult = new NamedList();
+        ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
+          countDownLatch.countDown();
+          Object addResultFailure = addResult.get("failure");
+          if (addResultFailure != null) {
+            SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
+            if (failure == null) {
+              failure = new SimpleOrderedMap();
+              results.add("failure", failure);
+            }
+            failure.addAll((NamedList) addResultFailure);
+          } else {
+            SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
+            if (success == null) {
+              success = new SimpleOrderedMap();
+              results.add("success", success);
+            }
+            success.addAll((NamedList) addResult.get("success"));
+          }
+        });
+      }
+    } finally {
+      if (sessionWrapper.get() != null) sessionWrapper.get().release();
+    }
+
+    log.debug("Waiting for create shard action to complete");
+    countDownLatch.await(5, TimeUnit.MINUTES);
+    log.debug("Finished waiting for create shard action to complete");
+
+    log.info("Finished create command on all shards for collection: " + collectionName);
+
+  }
+
+  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+         String collectionName, ZkNodeProps message, AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+    String sliceName = message.getStr(SHARD_ID_PROP);
+    DocCollection collection = clusterState.getCollection(collectionName);
+
+    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
+    int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
+    int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
+    int totalReplicas = numNrtReplicas + numPullReplicas + numTlogReplicas;
+
+    if (numNrtReplicas + numTlogReplicas <= 0) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
+    }
+
+    Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
+
+    boolean usePolicyFramework = CloudUtil.usePolicyFramework(collection, cloudManager);
+    List<ReplicaPosition> positions;
+    if (usePolicyFramework) {
+      if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
+      positions = Assign.identifyNodes(cloudManager,
+          clusterState,
+          Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM),
+          collection.getName(),
+          message,
+          Collections.singletonList(sliceName),
+          numNrtReplicas,
+          numTlogReplicas,
+          numPullReplicas);
+      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+    } else {
+      List<Assign.ReplicaCount> sortedNodeList = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, totalReplicas,
+          createNodeSetStr, cloudManager);
+      int i = 0;
+      positions = new ArrayList<>();
+      for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
+          Replica.Type.TLOG, numTlogReplicas,
+          Replica.Type.PULL, numPullReplicas
+      ).entrySet()) {
+        for (int j = 0; j < e.getValue(); j++) {
+          positions.add(new ReplicaPosition(sliceName, j + 1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
+          i++;
+        }
+      }
+    }
+    return positions;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
new file mode 100644
index 0000000..32715d6
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the functionality of creating a collection level snapshot.
+ */
+public class CreateSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public CreateSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName =  message.getStr(COLLECTION_PROP);
+    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
+    String asyncId = message.getStr(ASYNC);
+    SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
+    Date creationDate = new Date();
+
+    if(SolrSnapshotManager.snapshotExists(zkClient, collectionName, commitName)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
+          + " already exists for collection " + collectionName);
+    }
+
+    log.info("Creating a snapshot for collection={} with commitName={}", collectionName, commitName);
+
+    // Create a node in ZK to store the collection level snapshot meta-data.
+    SolrSnapshotManager.createCollectionLevelSnapshot(zkClient, collectionName, new CollectionSnapshotMetaData(commitName));
+    log.info("Created a ZK path to store snapshot information for collection={} with commitName={}", collectionName, commitName);
+
+    Map<String, String> requestMap = new HashMap<>();
+    NamedList shardRequestResults = new NamedList();
+    Map<String, Slice> shardByCoreName = new HashMap<>();
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+
+    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        if (replica.getState() != State.ACTIVE) {
+          log.info("Replica {} is not active. Hence not sending the createsnapshot request", replica.getCoreName());
+          continue; // Since replica is not active - no point sending a request.
+        }
+
+        String coreName = replica.getStr(CORE_NAME_PROP);
+
+        ModifiableSolrParams params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATESNAPSHOT.toString());
+        params.set(NAME, slice.getName());
+        params.set(CORE_NAME_PROP, coreName);
+        params.set(CoreAdminParams.COMMIT_NAME, commitName);
+
+        ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
+        log.debug("Sent createsnapshot request to core={} with commitName={}", coreName, commitName);
+
+        shardByCoreName.put(coreName, slice);
+      }
+    }
+
+    // At this point we want to make sure that at-least one replica for every shard
+    // is able to create the snapshot. If that is not the case, then we fail the request.
+    // This is to take care of the situation where e.g. entire shard is unavailable.
+    Set<String> failedShards = new HashSet<>();
+
+    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
+    NamedList success = (NamedList) shardRequestResults.get("success");
+    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
+    if (success != null) {
+      for ( int i = 0 ; i < success.size() ; i++) {
+        NamedList resp = (NamedList)success.getVal(i);
+
+        // Check if this core is the leader for the shard. The idea here is that during the backup
+        // operation we preferably use the snapshot of the "leader" replica since it is most likely
+        // to have latest state.
+        String coreName = (String)resp.get(CoreAdminParams.CORE);
+        Slice slice = shardByCoreName.remove(coreName);
+        boolean leader = (slice.getLeader() != null && slice.getLeader().getCoreName().equals(coreName));
+        resp.add(SolrSnapshotManager.SHARD_ID, slice.getName());
+        resp.add(SolrSnapshotManager.LEADER, leader);
+
+        CoreSnapshotMetaData c = new CoreSnapshotMetaData(resp);
+        replicas.add(c);
+        log.info("Snapshot with commitName {} is created successfully for core {}", commitName, c.getCoreName());
+      }
+    }
+
+    if (!shardByCoreName.isEmpty()) { // One or more failures.
+      log.warn("Unable to create a snapshot with name {} for following cores {}", commitName, shardByCoreName.keySet());
+
+      // Count number of failures per shard.
+      Map<String, Integer> failuresByShardId = new HashMap<>();
+      for (Map.Entry<String,Slice> entry : shardByCoreName.entrySet()) {
+        int f = 0;
+        if (failuresByShardId.get(entry.getValue().getName()) != null) {
+          f = failuresByShardId.get(entry.getValue().getName());
+        }
+        failuresByShardId.put(entry.getValue().getName(), f + 1);
+      }
+
+      // Now that we know number of failures per shard, we can figure out
+      // if at-least one replica per shard was able to create a snapshot or not.
+      DocCollection collectionStatus = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
+      for (Map.Entry<String,Integer> entry : failuresByShardId.entrySet()) {
+        int replicaCount = collectionStatus.getSlice(entry.getKey()).getReplicas().size();
+        if (replicaCount <= entry.getValue()) {
+          failedShards.add(entry.getKey());
+        }
+      }
+    }
+
+    if (failedShards.isEmpty()) { // No failures.
+      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Successful, creationDate, replicas);
+      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
+      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
+          commitName, meta.toNamedList());
+    } else {
+      log.warn("Failed to create a snapshot for collection {} with commitName = {}. Snapshot could not be captured for following shards {}",
+          collectionName, commitName, failedShards);
+      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
+      // which cores have the named snapshot.
+      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Failed, creationDate, replicas);
+      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
+      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
+          commitName, meta.toNamedList());
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to create snapshot on shards " + failedShards);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
new file mode 100644
index 0000000..e199d7d
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
@@ -0,0 +1,43 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class DeleteAliasCmd implements OverseerCollectionMessageHandler.Cmd {
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteAliasCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String aliasName = message.getStr(NAME);
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    zkStateReader.aliasesHolder.applyModificationAndExportToZk(a -> a.cloneWithCollectionAlias(aliasName, null));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
new file mode 100644
index 0000000..bdae8b9
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
@@ -0,0 +1,142 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.common.NonExistentCoreException;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Aliases;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public DeleteCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    Aliases aliases = zkStateReader.getAliases();
+    final String collection = message.getStr(NAME);
+    for (Map.Entry<String, List<String>> ent :  aliases.getCollectionAliasListMap().entrySet()) {
+      if (ent.getValue().contains(collection)) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "Collection : " + collection + " is part of alias " + ent.getKey() + " remove or modify the alias before removing this collection.");
+      }
+    }
+
+    try {
+      // Remove the snapshots meta-data for this collection in ZK. Deleting actual index files
+      // should be taken care of as part of collection delete operation.
+      SolrZkClient zkClient = zkStateReader.getZkClient();
+      SolrSnapshotManager.cleanupCollectionLevelSnapshots(zkClient, collection);
+
+      if (zkStateReader.getClusterState().getCollectionOrNull(collection) == null) {
+        if (zkStateReader.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
+          // if the collection is not in the clusterstate, but is listed in zk, do nothing, it will just
+          // be removed in the finally - we cannot continue, because the below code will error if the collection
+          // is not in the clusterstate
+          return;
+        }
+      }
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
+      params.set(CoreAdminParams.DELETE_INSTANCE_DIR, true);
+      params.set(CoreAdminParams.DELETE_DATA_DIR, true);
+
+      String asyncId = message.getStr(ASYNC);
+      Map<String, String> requestMap = null;
+      if (asyncId != null) {
+        requestMap = new HashMap<>();
+      }
+
+      Set<String> okayExceptions = new HashSet<>(1);
+      okayExceptions.add(NonExistentCoreException.class.getName());
+
+      ocmh.collectionCmd(message, params, results, null, asyncId, requestMap, okayExceptions);
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETE.toLower(), NAME, collection);
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+
+      // wait for a while until we don't see the collection
+      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+      boolean removed = false;
+      while (! timeout.hasTimedOut()) {
+        timeout.sleep(100);
+        removed = !zkStateReader.getClusterState().hasCollection(collection);
+        if (removed) {
+          timeout.sleep(500); // just a bit of time so it's more likely other
+          // readers see on return
+          break;
+        }
+      }
+      if (!removed) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "Could not fully remove collection: " + collection);
+      }
+
+    } finally {
+
+      try {
+        if (zkStateReader.getZkClient().exists(
+            ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
+          zkStateReader.getZkClient().clean(
+              ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection);
+        }
+      } catch (InterruptedException e) {
+        SolrException.log(log, "Cleaning up collection in zk was interrupted:"
+            + collection, e);
+        Thread.currentThread().interrupt();
+      } catch (KeeperException e) {
+        SolrException.log(log, "Problem cleaning up collection in zk:"
+            + collection, e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
new file mode 100644
index 0000000..ab4dc0c
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteNodeCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ocmh.checkRequired(message, "node");
+    String node = message.getStr("node");
+    if (!state.liveNodesContain(node)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + node + " is not live");
+    }
+    List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
+    List<String> singleReplicas = verifyReplicaAvailability(sourceReplicas, state);
+    if (!singleReplicas.isEmpty()) {
+      results.add("failure", "Can't delete the only existing non-PULL replica(s) on node " + node + ": " + singleReplicas.toString());
+    } else {
+      cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
+    }
+  }
+
+  // collect names of replicas that cannot be deleted
+  static List<String> verifyReplicaAvailability(List<ZkNodeProps> sourceReplicas, ClusterState state) {
+    List<String> res = new ArrayList<>();
+    for (ZkNodeProps sourceReplica : sourceReplicas) {
+      String coll = sourceReplica.getStr(COLLECTION_PROP);
+      String shard = sourceReplica.getStr(SHARD_ID_PROP);
+      String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
+      DocCollection collection = state.getCollection(coll);
+      Slice slice = collection.getSlice(shard);
+      if (slice.getReplicas().size() < 2) {
+        // can't delete the only replica in existence
+        res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
+      } else { // check replica types
+        int otherNonPullReplicas = 0;
+        for (Replica r : slice.getReplicas()) {
+          if (!r.getName().equals(replicaName) && !r.getType().equals(Replica.Type.PULL)) {
+            otherNonPullReplicas++;
+          }
+        }
+        // can't delete - there are no other non-pull replicas
+        if (otherNonPullReplicas == 0) {
+          res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
+        }
+      }
+    }
+    return res;
+  }
+
+  static void cleanupReplicas(NamedList results,
+                              ClusterState clusterState,
+                              List<ZkNodeProps> sourceReplicas,
+                              OverseerCollectionMessageHandler ocmh,
+                              String node,
+                              String async) throws InterruptedException {
+    CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
+    for (ZkNodeProps sourceReplica : sourceReplicas) {
+      String coll = sourceReplica.getStr(COLLECTION_PROP);
+      String shard = sourceReplica.getStr(SHARD_ID_PROP);
+      String type = sourceReplica.getStr(ZkStateReader.REPLICA_TYPE);
+      log.info("Deleting replica type={} for collection={} shard={} on node={}", type, coll, shard, node);
+      NamedList deleteResult = new NamedList();
+      try {
+        if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);
+        ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, sourceReplica.plus("parallel", "true"), deleteResult, () -> {
+          cleanupLatch.countDown();
+          if (deleteResult.get("failure") != null) {
+            synchronized (results) {
+
+              results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
+                  " on node=%s", coll, shard, node));
+            }
+          }
+        });
+      } catch (KeeperException e) {
+        log.warn("Error deleting ", e);
+        cleanupLatch.countDown();
+      } catch (Exception e) {
+        log.warn("Error deleting ", e);
+        cleanupLatch.countDown();
+        throw e;
+      }
+    }
+    log.debug("Waiting for delete node action to complete");
+    cleanupLatch.await(5, TimeUnit.MINUTES);
+  }
+
+
+}


[35/41] lucene-solr:jira/solr-11702: LUCENE-8124: Add missing file.

Posted by da...@apache.org.
LUCENE-8124: Add missing file.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f72a5dbd
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f72a5dbd
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f72a5dbd

Branch: refs/heads/jira/solr-11702
Commit: f72a5dbdc81605084e38f742dbf3567b922733b2
Parents: f5e2267
Author: Adrien Grand <jp...@gmail.com>
Authored: Mon Jan 22 08:48:59 2018 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Mon Jan 22 08:48:59 2018 +0100

----------------------------------------------------------------------
 .../compound/hyphenation-LUCENE-8124.xml        | 61 ++++++++++++++++++++
 1 file changed, 61 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f72a5dbd/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/hyphenation-LUCENE-8124.xml
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/hyphenation-LUCENE-8124.xml b/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/hyphenation-LUCENE-8124.xml
new file mode 100644
index 0000000..8710eab
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/hyphenation-LUCENE-8124.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE hyphenation-info SYSTEM "hyphenation.dtd">
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!--
+This file contains the hyphenation patterns for danish.
+Adapted from dkhyph.tex, dkcommon.tex and dkspecial.tex
+originally created by Frank Jensen (fj@iesd.auc.dk).
+FOP adaptation by Carlos Villegas (cav@uniscope.co.jp)
+-->
+<hyphenation-info>
+
+<hyphen-char value="-"/>
+<hyphen-min before="2" after="2"/>
+
+<classes>
+aA
+bB
+cC
+dD
+eE
+fF
+gG
+hH
+iI
+jJ
+kK
+lL
+mM
+nN
+oO
+pP
+qQ
+rR
+sS
+tT
+uU
+vV
+wW
+xX
+yY
+zZ
+æÆ
+øØ
+åÅ
+</classes>
+<patterns>
+d7f
+</patterns>
+</hyphenation-info>


[14/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
deleted file mode 100644
index bb99799..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ /dev/null
@@ -1,1015 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.HashBasedRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.TestInjection;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-
-@Slow
-public class ShardSplitTest extends BasicDistributedZkTest {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String SHARD1_0 = SHARD1 + "_0";
-  public static final String SHARD1_1 = SHARD1 + "_1";
-
-  public ShardSplitTest() {
-    schemaString = "schema15.xml";      // we need a string id
-  }
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    useFactory(null);
-  }
-
-  @Test
-  public void test() throws Exception {
-
-    waitForThingsToLevelOut(15);
-
-    if (usually()) {
-      log.info("Using legacyCloud=false for cluster");
-      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
-          .process(cloudClient);
-    }
-    incompleteOrOverlappingCustomRangeTest();
-    splitByUniqueKeyTest();
-    splitByRouteFieldTest();
-    splitByRouteKeyTest();
-
-    // todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
-    // and the new sub-shards don't have any.
-    waitForRecoveriesToFinish(true);
-    //waitForThingsToLevelOut(15);
-  }
-
-  /*
-  Creates a collection with replicationFactor=1, splits a shard. Restarts the sub-shard leader node.
-  Add a replica. Ensure count matches in leader and replica.
-   */
-  public void testSplitStaticIndexReplication() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    DocCollection defCol = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-    Replica replica = defCol.getReplicas().get(0);
-    String nodeName = replica.getNodeName();
-
-    String collectionName = "testSplitStaticIndexReplication";
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
-    create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
-    create.setCreateNodeSet(nodeName); // we want to create the leader on a fixed node so that we know which one to restart later
-    create.process(cloudClient);
-    try (CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), true, cloudClient.getLbClient().getHttpClient())) {
-      client.setDefaultCollection(collectionName);
-      StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, client, "i1", true);
-      try {
-        thread.start();
-        Thread.sleep(1000); // give the indexer sometime to do its work
-        thread.safeStop();
-        thread.join();
-        client.commit();
-        controlClient.commit();
-
-        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
-        splitShard.setShardName(SHARD1);
-        String asyncId = splitShard.processAsync(client);
-        RequestStatusState state = CollectionAdminRequest.requestStatus(asyncId).waitFor(client, 120);
-        if (state == RequestStatusState.COMPLETED)  {
-          waitForRecoveriesToFinish(collectionName, true);
-          // let's wait to see parent shard become inactive
-          CountDownLatch latch = new CountDownLatch(1);
-          client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
-            @Override
-            public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-              Slice parent = collectionState.getSlice(SHARD1);
-              Slice slice10 = collectionState.getSlice(SHARD1_0);
-              Slice slice11 = collectionState.getSlice(SHARD1_1);
-              if (slice10 != null && slice11 != null &&
-                  parent.getState() == Slice.State.INACTIVE &&
-                  slice10.getState() == Slice.State.ACTIVE &&
-                  slice11.getState() == Slice.State.ACTIVE) {
-                latch.countDown();
-                return true; // removes the watch
-              }
-              return false;
-            }
-          });
-          latch.await(1, TimeUnit.MINUTES);
-          if (latch.getCount() != 0)  {
-            // sanity check
-            fail("Sub-shards did not become active even after waiting for 1 minute");
-          }
-
-          int liveNodeCount = client.getZkStateReader().getClusterState().getLiveNodes().size();
-
-          // restart the sub-shard leader node
-          boolean restarted = false;
-          for (JettySolrRunner jetty : jettys) {
-            int port = jetty.getBaseUrl().getPort();
-            if (replica.getStr(BASE_URL_PROP).contains(":" + port))  {
-              ChaosMonkey.kill(jetty);
-              ChaosMonkey.start(jetty);
-              restarted = true;
-              break;
-            }
-          }
-          if (!restarted) {
-            // sanity check
-            fail("We could not find a jetty to kill for replica: " + replica.getCoreUrl());
-          }
-
-          // add a new replica for the sub-shard
-          CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collectionName, SHARD1_0);
-          // use control client because less chances of it being the node being restarted
-          // this is to avoid flakiness of test because of NoHttpResponseExceptions
-          String control_collection = client.getZkStateReader().getClusterState().getCollection("control_collection").getReplicas().get(0).getStr(BASE_URL_PROP);
-          try (HttpSolrClient control = new HttpSolrClient.Builder(control_collection).withHttpClient(client.getLbClient().getHttpClient()).build())  {
-            state = addReplica.processAndWait(control, 30);
-          }
-          if (state == RequestStatusState.COMPLETED)  {
-            CountDownLatch newReplicaLatch = new CountDownLatch(1);
-            client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
-              @Override
-              public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-                if (liveNodes.size() != liveNodeCount)  {
-                  return false;
-                }
-                Slice slice = collectionState.getSlice(SHARD1_0);
-                if (slice.getReplicas().size() == 2)  {
-                  if (!slice.getReplicas().stream().anyMatch(r -> r.getState() == Replica.State.RECOVERING)) {
-                    // we see replicas and none of them are recovering
-                    newReplicaLatch.countDown();
-                    return true;
-                  }
-                }
-                return false;
-              }
-            });
-            newReplicaLatch.await(30, TimeUnit.SECONDS);
-            // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
-            // handle new shards/replica so well.
-            ClusterState clusterState = client.getZkStateReader().getClusterState();
-            DocCollection collection = clusterState.getCollection(collectionName);
-            int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
-            assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
-          } else  {
-            fail("Adding a replica to sub-shard did not complete even after waiting for 30 seconds!. Saw state = " + state.getKey());
-          }
-        } else {
-          fail("We expected shard split to succeed on a static index but it didn't. Found state = " + state.getKey());
-        }
-      } finally {
-        thread.safeStop();
-        thread.join();
-      }
-    }
-  }
-
-  private int assertConsistentReplicas(Slice shard) throws SolrServerException, IOException {
-    long numFound = Long.MIN_VALUE;
-    int count = 0;
-    for (Replica replica : shard.getReplicas()) {
-      HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
-          .withHttpClient(cloudClient.getLbClient().getHttpClient()).build();
-      QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
-      log.info("Found numFound={} on replica: {}", response.getResults().getNumFound(), replica.getCoreUrl());
-      if (numFound == Long.MIN_VALUE)  {
-        numFound = response.getResults().getNumFound();
-      } else  {
-        assertEquals("Shard " + shard.getName() + " replicas do not have same number of documents", numFound, response.getResults().getNumFound());
-      }
-      count++;
-    }
-    return count;
-  }
-
-  /**
-   * Used to test that we can split a shard when a previous split event
-   * left sub-shards in construction or recovery state.
-   *
-   * See SOLR-9439
-   */
-  @Test
-  public void testSplitAfterFailedSplit() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    TestInjection.splitFailureBeforeReplicaCreation = "true:100"; // we definitely want split to fail
-    try {
-      try {
-        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        splitShard.setShardName(SHARD1);
-        splitShard.process(cloudClient);
-        fail("Shard split was not supposed to succeed after failure injection!");
-      } catch (Exception e) {
-        // expected
-      }
-
-      // assert that sub-shards cores exist and sub-shard is in construction state
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-      zkStateReader.forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-      ClusterState state = zkStateReader.getClusterState();
-      DocCollection collection = state.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-
-      Slice shard10 = collection.getSlice(SHARD1_0);
-      assertEquals(Slice.State.CONSTRUCTION, shard10.getState());
-      assertEquals(1, shard10.getReplicas().size());
-
-      Slice shard11 = collection.getSlice(SHARD1_1);
-      assertEquals(Slice.State.CONSTRUCTION, shard11.getState());
-      assertEquals(1, shard11.getReplicas().size());
-
-      // lets retry the split
-      TestInjection.reset(); // let the split succeed
-      try {
-        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        splitShard.setShardName(SHARD1);
-        splitShard.process(cloudClient);
-        // Yay!
-      } catch (Exception e) {
-        log.error("Shard split failed", e);
-        fail("Shard split did not succeed after a previous failed split attempt left sub-shards in construction state");
-      }
-
-    } finally {
-      TestInjection.reset();
-    }
-  }
-
-  @Test
-  public void testSplitWithChaosMonkey() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    List<StoppableIndexingThread> indexers = new ArrayList<>();
-    try {
-      for (int i = 0; i < 1; i++) {
-        StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, cloudClient, String.valueOf(i), true);
-        indexers.add(thread);
-        thread.start();
-      }
-      Thread.sleep(1000); // give the indexers some time to do their work
-    } catch (Exception e) {
-      log.error("Error in test", e);
-    } finally {
-      for (StoppableIndexingThread indexer : indexers) {
-        indexer.safeStop();
-        indexer.join();
-      }
-    }
-
-    cloudClient.commit();
-    controlClient.commit();
-
-    AtomicBoolean stop = new AtomicBoolean();
-    AtomicBoolean killed = new AtomicBoolean(false);
-    Runnable monkey = new Runnable() {
-      @Override
-      public void run() {
-        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-        zkStateReader.registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
-          @Override
-          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-            if (stop.get()) {
-              return true; // abort and remove the watch
-            }
-            Slice slice = collectionState.getSlice(SHARD1_0);
-            if (slice != null && slice.getReplicas().size() > 1) {
-              // ensure that only one watcher invocation thread can kill!
-              if (killed.compareAndSet(false, true))  {
-                log.info("Monkey thread found 2 replicas for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
-                CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
-                try {
-                  Thread.sleep(1000 + random().nextInt(500));
-                  ChaosMonkey.kill(cjetty);
-                  stop.set(true);
-                  return true;
-                } catch (Exception e) {
-                  log.error("Monkey unable to kill jetty at port " + cjetty.jetty.getLocalPort(), e);
-                }
-              }
-            }
-            log.info("Monkey thread found only one replica for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
-            return false;
-          }
-        });
-      }
-    };
-
-    Thread monkeyThread = null;
-    monkeyThread = new Thread(monkey);
-    monkeyThread.start();
-    try {
-      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-      splitShard.setShardName(SHARD1);
-      String asyncId = splitShard.processAsync(cloudClient);
-      RequestStatusState splitStatus = null;
-      try {
-        splitStatus = CollectionAdminRequest.requestStatus(asyncId).waitFor(cloudClient, 120);
-      } catch (Exception e) {
-        log.warn("Failed to get request status, maybe because the overseer node was shutdown by monkey", e);
-      }
-
-      // we don't care if the split failed because we are injecting faults and it is likely
-      // that the split has failed but in any case we want to assert that all docs that got
-      // indexed are available in SolrCloud and if the split succeeded then all replicas of the sub-shard
-      // must be consistent (i.e. have same numdocs)
-
-      log.info("Shard split request state is COMPLETED");
-      stop.set(true);
-      monkeyThread.join();
-      Set<String> addFails = new HashSet<>();
-      Set<String> deleteFails = new HashSet<>();
-      for (StoppableIndexingThread indexer : indexers) {
-        addFails.addAll(indexer.getAddFails());
-        deleteFails.addAll(indexer.getDeleteFails());
-      }
-
-      CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
-      log.info("Starting shard1 leader jetty at port {}", cjetty.jetty.getLocalPort());
-      ChaosMonkey.start(cjetty.jetty);
-      cloudClient.getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-      log.info("Current collection state: {}", printClusterStateInfo(AbstractDistribZkTestBase.DEFAULT_COLLECTION));
-
-      boolean replicaCreationsFailed = false;
-      if (splitStatus == RequestStatusState.FAILED)  {
-        // either one or more replica creation failed (because it may have been created on the same parent shard leader node)
-        // or the split may have failed while trying to soft-commit *after* all replicas have been created
-        // the latter counts as a successful switch even if the API doesn't say so
-        // so we must find a way to distinguish between the two
-        // an easy way to do that is to look at the sub-shard replicas and check if the replica core actually exists
-        // instead of existing solely inside the cluster state
-        DocCollection collectionState = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        Slice slice10 = collectionState.getSlice(SHARD1_0);
-        Slice slice11 = collectionState.getSlice(SHARD1_1);
-        if (slice10 != null && slice11 != null) {
-          for (Replica replica : slice10) {
-            if (!doesReplicaCoreExist(replica)) {
-              replicaCreationsFailed = true;
-              break;
-            }
-          }
-          for (Replica replica : slice11) {
-            if (!doesReplicaCoreExist(replica)) {
-              replicaCreationsFailed = true;
-              break;
-            }
-          }
-        }
-      }
-
-      // true if sub-shard states switch to 'active' eventually
-      AtomicBoolean areSubShardsActive = new AtomicBoolean(false);
-
-      if (!replicaCreationsFailed)  {
-        // all sub-shard replicas were created successfully so all cores must recover eventually
-        waitForRecoveriesToFinish(AbstractDistribZkTestBase.DEFAULT_COLLECTION, true);
-        // let's wait for the overseer to switch shard states
-        CountDownLatch latch = new CountDownLatch(1);
-        cloudClient.getZkStateReader().registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
-          @Override
-          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-            Slice parent = collectionState.getSlice(SHARD1);
-            Slice slice10 = collectionState.getSlice(SHARD1_0);
-            Slice slice11 = collectionState.getSlice(SHARD1_1);
-            if (slice10 != null && slice11 != null &&
-                parent.getState() == Slice.State.INACTIVE &&
-                slice10.getState() == Slice.State.ACTIVE &&
-                slice11.getState() == Slice.State.ACTIVE) {
-              areSubShardsActive.set(true);
-              latch.countDown();
-              return true; // removes the watch
-            } else if (slice10 != null && slice11 != null &&
-                parent.getState() == Slice.State.ACTIVE &&
-                slice10.getState() == Slice.State.RECOVERY_FAILED &&
-                slice11.getState() == Slice.State.RECOVERY_FAILED) {
-              areSubShardsActive.set(false);
-              latch.countDown();
-              return true;
-            }
-            return false;
-          }
-        });
-
-        latch.await(2, TimeUnit.MINUTES);
-
-        if (latch.getCount() != 0)  {
-          // sanity check
-          fail("We think that split was successful but sub-shard states were not updated even after 2 minutes.");
-        }
-      }
-
-      cloudClient.commit(); // for visibility of results on sub-shards
-
-      checkShardConsistency(true, true, addFails, deleteFails);
-      long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-      // ensure we have added more than 0 docs
-      long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-      assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);
-      assertEquals("Found " + ctrlDocs + " control docs and " + cloudClientDocs + " cloud docs", ctrlDocs, cloudClientDocs);
-
-      // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
-      // handle new shards/replica so well.
-      if (areSubShardsActive.get()) {
-        ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-        DocCollection collection = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
-        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
-        numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_1));
-        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_1", 2, numReplicasChecked);
-      }
-    } finally {
-      stop.set(true);
-      monkeyThread.join();
-    }
-  }
-
-  private boolean doesReplicaCoreExist(Replica replica) throws IOException {
-    try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getStr(BASE_URL_PROP))
-        .withHttpClient(cloudClient.getLbClient().getHttpClient()).build())  {
-      String coreName = replica.getCoreName();
-      try {
-        CoreAdminResponse status = CoreAdminRequest.getStatus(coreName, client);
-        if (status.getCoreStatus(coreName) == null || status.getCoreStatus(coreName).size() == 0) {
-          return false;
-        }
-      } catch (Exception e) {
-        log.warn("Error gettting core status of replica " + replica + ". Perhaps it does not exist!", e);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Test
-  public void testSplitShardWithRule() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    if (usually()) {
-      log.info("Using legacyCloud=false for cluster");
-      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
-          .process(cloudClient);
-    }
-
-    log.info("Starting testSplitShardWithRule");
-    String collectionName = "shardSplitWithRule";
-    CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2)
-        .setRule("shard:*,replica:<2,node:*");
-    CollectionAdminResponse response = createRequest.process(cloudClient);
-    assertEquals(0, response.getStatus());
-
-    CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName)
-        .setShardName("shard1");
-    response = splitShardRequest.process(cloudClient);
-    assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus());
-  }
-
-  private void incompleteOrOverlappingCustomRangeTest() throws Exception  {
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
-    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
-    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-
-    List<DocRouter.Range> subRanges = new ArrayList<>();
-    List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
-
-    // test with only one range
-    subRanges.add(ranges.get(0));
-    try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-      fail("Shard splitting with just one custom hash range should not succeed");
-    } catch (HttpSolrClient.RemoteSolrException e) {
-      log.info("Expected exception:", e);
-    }
-    subRanges.clear();
-
-    // test with ranges with a hole in between them
-    subRanges.add(ranges.get(3)); // order shouldn't matter
-    subRanges.add(ranges.get(0));
-    try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-      fail("Shard splitting with missing hashes in between given ranges should not succeed");
-    } catch (HttpSolrClient.RemoteSolrException e) {
-      log.info("Expected exception:", e);
-    }
-    subRanges.clear();
-
-    // test with overlapping ranges
-    subRanges.add(ranges.get(0));
-    subRanges.add(ranges.get(1));
-    subRanges.add(ranges.get(2));
-    subRanges.add(new DocRouter.Range(ranges.get(3).min - 15, ranges.get(3).max));
-    try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-      fail("Shard splitting with overlapping ranges should not succeed");
-    } catch (HttpSolrClient.RemoteSolrException e) {
-      log.info("Expected exception:", e);
-    }
-    subRanges.clear();
-  }
-
-  private void splitByUniqueKeyTest() throws Exception {
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
-    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
-    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-    List<DocRouter.Range> subRanges = new ArrayList<>();
-    if (usually())  {
-      List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
-      // 75% of range goes to shard1_0 and the rest to shard1_1
-      subRanges.add(new DocRouter.Range(ranges.get(0).min, ranges.get(2).max));
-      subRanges.add(ranges.get(3));
-    } else  {
-      subRanges = router.partitionRange(2, shard1Range);
-    }
-    final List<DocRouter.Range> ranges = subRanges;
-    final int[] docCounts = new int[ranges.size()];
-    int numReplicas = shard1.getReplicas().size();
-
-    del("*:*");
-    for (int id = 0; id <= 100; id++) {
-      String shardKey = "" + (char)('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
-      indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id), id);
-    }
-    commit();
-
-    Thread indexThread = new Thread() {
-      @Override
-      public void run() {
-        Random random = random();
-        int max = atLeast(random, 401);
-        int sleep = atLeast(random, 25);
-        log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms");
-        Set<String> deleted = new HashSet<>();
-        for (int id = 101; id < max; id++) {
-          try {
-            indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
-            Thread.sleep(sleep);
-            if (usually(random))  {
-              String delId = String.valueOf(random.nextInt(id - 101 + 1) + 101);
-              if (deleted.contains(delId))  continue;
-              try {
-                deleteAndUpdateCount(router, ranges, docCounts, delId);
-                deleted.add(delId);
-              } catch (Exception e) {
-                log.error("Exception while deleting docs", e);
-              }
-            }
-          } catch (Exception e) {
-            log.error("Exception while adding doc id = " + id, e);
-            // do not select this id for deletion ever
-            deleted.add(String.valueOf(id));
-          }
-        }
-      }
-    };
-    indexThread.start();
-
-    try {
-      for (int i = 0; i < 3; i++) {
-        try {
-          splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-          log.info("Layout after split: \n");
-          printLayout();
-          break;
-        } catch (HttpSolrClient.RemoteSolrException e) {
-          if (e.code() != 500)  {
-            throw e;
-          }
-          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
-          if (i == 2) {
-            fail("SPLITSHARD was not successful even after three tries");
-          }
-        }
-      }
-    } finally {
-      try {
-        indexThread.join();
-      } catch (InterruptedException e) {
-        log.error("Indexing thread interrupted", e);
-      }
-    }
-
-    waitForRecoveriesToFinish(true);
-    checkDocCountsAndShardStates(docCounts, numReplicas);
-  }
-
-
-  public void splitByRouteFieldTest() throws Exception  {
-    log.info("Starting testSplitWithRouteField");
-    String collectionName = "routeFieldColl";
-    int numShards = 4;
-    int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
-        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-    String shard_fld = "shard_s";
-    try (CloudSolrClient client = createCloudClient(null)) {
-      Map<String, Object> props = Utils.makeMap(
-          REPLICATION_FACTOR, replicationFactor,
-          MAX_SHARDS_PER_NODE, maxShardsPerNode,
-          NUM_SLICES, numShards,
-          "router.field", shard_fld);
-
-      createCollection(collectionInfos, collectionName,props,client);
-    }
-
-    List<Integer> list = collectionInfos.get(collectionName);
-    checkForCollection(collectionName, list, null);
-
-    waitForRecoveriesToFinish(false);
-
-    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
-
-    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
-      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
-      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-      final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
-      final int[] docCounts = new int[ranges.size()];
-
-      for (int i = 100; i <= 200; i++) {
-        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
-
-        collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
-        int idx = getHashRangeIdx(router, ranges, shardKey);
-        if (idx != -1) {
-          docCounts[idx]++;
-        }
-      }
-
-      for (int i = 0; i < docCounts.length; i++) {
-        int docCount = docCounts[i];
-        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
-      }
-
-      collectionClient.commit();
-
-      for (int i = 0; i < 3; i++) {
-        try {
-          splitShard(collectionName, SHARD1, null, null);
-          break;
-        } catch (HttpSolrClient.RemoteSolrException e) {
-          if (e.code() != 500) {
-            throw e;
-          }
-          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
-          if (i == 2) {
-            fail("SPLITSHARD was not successful even after three tries");
-          }
-        }
-      }
-
-      waitForRecoveriesToFinish(collectionName, false);
-
-      assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
-      assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
-    }
-  }
-
-  private void splitByRouteKeyTest() throws Exception {
-    log.info("Starting splitByRouteKeyTest");
-    String collectionName = "splitByRouteKeyTest";
-    int numShards = 4;
-    int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
-        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      Map<String, Object> props = Utils.makeMap(
-          REPLICATION_FACTOR, replicationFactor,
-          MAX_SHARDS_PER_NODE, maxShardsPerNode,
-          NUM_SLICES, numShards);
-
-      createCollection(collectionInfos, collectionName,props,client);
-    }
-
-    List<Integer> list = collectionInfos.get(collectionName);
-    checkForCollection(collectionName, list, null);
-
-    waitForRecoveriesToFinish(false);
-
-    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
-
-    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-
-      String splitKey = "b!";
-
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
-      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
-      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-      final List<DocRouter.Range> ranges = ((CompositeIdRouter) router).partitionRangeByKey(splitKey, shard1Range);
-      final int[] docCounts = new int[ranges.size()];
-
-      int uniqIdentifier = (1 << 12);
-      int splitKeyDocCount = 0;
-      for (int i = 100; i <= 200; i++) {
-        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
-
-        String idStr = shardKey + "!" + i;
-        collectionClient.add(getDoc(id, idStr, "n_ti", (shardKey + "!").equals(splitKey) ? uniqIdentifier : i));
-        int idx = getHashRangeIdx(router, ranges, idStr);
-        if (idx != -1) {
-          docCounts[idx]++;
-        }
-        if (splitKey.equals(shardKey + "!"))
-          splitKeyDocCount++;
-      }
-
-      for (int i = 0; i < docCounts.length; i++) {
-        int docCount = docCounts[i];
-        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
-      }
-      log.info("Route key doc count = {}", splitKeyDocCount);
-
-      collectionClient.commit();
-
-      for (int i = 0; i < 3; i++) {
-        try {
-          splitShard(collectionName, null, null, splitKey);
-          break;
-        } catch (HttpSolrClient.RemoteSolrException e) {
-          if (e.code() != 500) {
-            throw e;
-          }
-          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
-          if (i == 2) {
-            fail("SPLITSHARD was not successful even after three tries");
-          }
-        }
-      }
-
-      waitForRecoveriesToFinish(collectionName, false);
-      SolrQuery solrQuery = new SolrQuery("*:*");
-      assertEquals("DocCount on shard1_0 does not match", docCounts[0], collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
-      assertEquals("DocCount on shard1_1 does not match", docCounts[1], collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
-      assertEquals("DocCount on shard1_2 does not match", docCounts[2], collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
-
-      solrQuery = new SolrQuery("n_ti:" + uniqIdentifier);
-      assertEquals("shard1_0 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
-      assertEquals("Wrong number of docs on shard1_1 for route key: " + splitKey, splitKeyDocCount, collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
-      assertEquals("shard1_2 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
-    }
-  }
-
-  protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws Exception {
-    ClusterState clusterState = null;
-    Slice slice1_0 = null, slice1_1 = null;
-    int i = 0;
-    for (i = 0; i < 10; i++) {
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-      clusterState = zkStateReader.getClusterState();
-      slice1_0 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_0");
-      slice1_1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_1");
-      if (slice1_0.getState() == Slice.State.ACTIVE && slice1_1.getState() == Slice.State.ACTIVE) {
-        break;
-      }
-      Thread.sleep(500);
-    }
-
-    log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
-
-    assertNotNull("Cluster state does not contain shard1_0", slice1_0);
-    assertNotNull("Cluster state does not contain shard1_0", slice1_1);
-    assertSame("shard1_0 is not active", Slice.State.ACTIVE, slice1_0.getState());
-    assertSame("shard1_1 is not active", Slice.State.ACTIVE, slice1_1.getState());
-    assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
-    assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
-
-    commit();
-
-    // can't use checkShardConsistency because it insists on jettys and clients for each shard
-    checkSubShardConsistency(SHARD1_0);
-    checkSubShardConsistency(SHARD1_1);
-
-    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
-    query.set("distrib", false);
-
-    ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
-    QueryResponse response;
-    try (HttpSolrClient shard1_0Client = getHttpSolrClient(shard1_0.getCoreUrl())) {
-      response = shard1_0Client.query(query);
-    }
-    long shard10Count = response.getResults().getNumFound();
-
-    ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
-        AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
-    QueryResponse response2;
-    try (HttpSolrClient shard1_1Client = getHttpSolrClient(shard1_1.getCoreUrl())) {
-      response2 = shard1_1Client.query(query);
-    }
-    long shard11Count = response2.getResults().getNumFound();
-
-    logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
-
-    assertEquals("Wrong doc count on shard1_0. See SOLR-5309", docCounts[0], shard10Count);
-    assertEquals("Wrong doc count on shard1_1. See SOLR-5309", docCounts[1], shard11Count);
-  }
-
-  protected void checkSubShardConsistency(String shard) throws SolrServerException, IOException {
-    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
-    query.set("distrib", false);
-
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    Slice slice = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(shard);
-    long[] numFound = new long[slice.getReplicasMap().size()];
-    int c = 0;
-    for (Replica replica : slice.getReplicas()) {
-      String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
-      QueryResponse response;
-      try (HttpSolrClient client = getHttpSolrClient(coreUrl)) {
-        response = client.query(query);
-      }
-      numFound[c++] = response.getResults().getNumFound();
-      log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
-      assertTrue("Shard: " + shard + " Replica: " + coreUrl + " has 0 docs", response.getResults().getNumFound() > 0);
-    }
-    for (int i = 0; i < slice.getReplicasMap().size(); i++) {
-      assertEquals(shard + " is not consistent", numFound[0], numFound[i]);
-    }
-  }
-
-  protected void splitShard(String collection, String shardId, List<DocRouter.Range> subRanges, String splitKey) throws SolrServerException, IOException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
-    params.set("collection", collection);
-    if (shardId != null)  {
-      params.set("shard", shardId);
-    }
-    if (subRanges != null)  {
-      StringBuilder ranges = new StringBuilder();
-      for (int i = 0; i < subRanges.size(); i++) {
-        DocRouter.Range subRange = subRanges.get(i);
-        ranges.append(subRange.toString());
-        if (i < subRanges.size() - 1)
-          ranges.append(",");
-      }
-      params.set("ranges", ranges.toString());
-    }
-    if (splitKey != null) {
-      params.set("split.key", splitKey);
-    }
-    SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
-        .getBaseURL();
-    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
-
-    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 30000, 60000 * 5)) {
-      baseServer.request(request);
-    }
-  }
-
-  protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
-    index("id", id, "n_ti", n);
-
-    int idx = getHashRangeIdx(router, ranges, id);
-    if (idx != -1)  {
-      docCounts[idx]++;
-    }
-  }
-
-  protected void deleteAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
-    controlClient.deleteById(id);
-    cloudClient.deleteById(id);
-
-    int idx = getHashRangeIdx(router, ranges, id);
-    if (idx != -1)  {
-      docCounts[idx]--;
-    }
-  }
-
-  public static int getHashRangeIdx(DocRouter router, List<DocRouter.Range> ranges, String id) {
-    int hash = 0;
-    if (router instanceof HashBasedRouter) {
-      HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
-      hash = hashBasedRouter.sliceHash(id, null, null,null);
-    }
-    for (int i = 0; i < ranges.size(); i++) {
-      DocRouter.Range range = ranges.get(i);
-      if (range.includes(hash))
-        return i;
-    }
-    return -1;
-  }
-
-  protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
-    for (int i = 0; i < docCounts.length; i++) {
-      int docCount = docCounts[i];
-      log.info("Expected docCount for shard1_{} = {}", i, docCount);
-    }
-
-    log.info("Actual docCount for shard1_0 = {}", shard10Count);
-    log.info("Actual docCount for shard1_1 = {}", shard11Count);
-    Map<String, String> idVsVersion = new HashMap<>();
-    Map<String, SolrDocument> shard10Docs = new HashMap<>();
-    Map<String, SolrDocument> shard11Docs = new HashMap<>();
-    for (int i = 0; i < response.getResults().size(); i++) {
-      SolrDocument document = response.getResults().get(i);
-      idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
-      SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
-      if (old != null) {
-        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
-      }
-    }
-    for (int i = 0; i < response2.getResults().size(); i++) {
-      SolrDocument document = response2.getResults().get(i);
-      String value = document.getFieldValue("id").toString();
-      String version = idVsVersion.get(value);
-      if (version != null) {
-        log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
-      }
-      SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
-      if (old != null) {
-        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
-      }
-    }
-  }
-
-  @Override
-  protected SolrClient createNewSolrClient(String collection, String baseUrl) {
-    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
-    return client;
-  }
-
-  @Override
-  protected SolrClient createNewSolrClient(int port) {
-    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
-    return client;
-  }
-
-  @Override
-  protected CloudSolrClient createCloudClient(String defaultCollection) {
-    CloudSolrClient client = super.createCloudClient(defaultCollection);
-    return client;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java b/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
deleted file mode 100644
index 3e18ce0..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.junit.Test;
-
-public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestBase {
-
-  public SimpleCollectionCreateDeleteTest() {
-    sliceCount = 1;
-  }
-
-  @Test
-  @ShardsFixed(num = 1)
-  public void test() throws Exception {
-    String overseerNode = OverseerCollectionConfigSetProcessor.getLeaderNode(cloudClient.getZkStateReader().getZkClient());
-    String notOverseerNode = null;
-    for (CloudJettyRunner cloudJetty : cloudJettys) {
-      if (!overseerNode.equals(cloudJetty.nodeName)) {
-        notOverseerNode = cloudJetty.nodeName;
-        break;
-      }
-    }
-    String collectionName = "SimpleCollectionCreateDeleteTest";
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,1,1)
-            .setCreateNodeSet(overseerNode)
-            .setStateFormat(2);
-
-    NamedList<Object> request = create.process(cloudClient).getResponse();
-
-    if (request.get("success") != null) {
-      assertTrue(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
-
-      CollectionAdminRequest delete = CollectionAdminRequest.deleteCollection(collectionName);
-      cloudClient.request(delete);
-
-      assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
-
-      // create collection again on a node other than the overseer leader
-      create = CollectionAdminRequest.createCollection(collectionName,1,1)
-              .setCreateNodeSet(notOverseerNode)
-              .setStateFormat(2);
-      request = create.process(cloudClient).getResponse();
-      assertTrue("Collection creation should not have failed", request.get("success") != null);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
deleted file mode 100644
index cf4111e..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.common.collect.Lists;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.V2Request;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARD_UNIQUE;
-
-public class TestCollectionAPI extends ReplicaPropertiesBase {
-
-  public static final String COLLECTION_NAME = "testcollection";
-  public static final String COLLECTION_NAME1 = "testcollection1";
-
-  public TestCollectionAPI() {
-    schemaString = "schema15.xml";      // we need a string id
-    sliceCount = 2;
-  }
-
-  @Test
-  @ShardsFixed(num = 2)
-  public void test() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      CollectionAdminRequest.Create req;
-      if (useTlogReplicas()) {
-        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 0, 1, 1);
-      } else {
-        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
-      }
-      req.setMaxShardsPerNode(2);
-      setV2(req);
-      client.request(req);
-      assertV2CallsCount();
-      createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
-    }
-
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME1, 1);
-    waitForRecoveriesToFinish(COLLECTION_NAME, false);
-    waitForRecoveriesToFinish(COLLECTION_NAME1, false);
-
-    listCollection();
-    clusterStatusNoCollection();
-    clusterStatusWithCollection();
-    clusterStatusWithCollectionAndShard();
-    clusterStatusWithRouteKey();
-    clusterStatusAliasTest();
-    clusterStatusRolesTest();
-    clusterStatusBadCollectionTest();
-    replicaPropTest();
-    clusterStatusZNodeVersion();
-    testClusterStateMigration();
-    testCollectionCreationCollectionNameValidation();
-    testCollectionCreationShardNameValidation();
-    testAliasCreationNameValidation();
-    testShardCreationNameValidation();
-  }
-
-  private void clusterStatusWithCollectionAndShard() throws IOException, SolrServerException {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", COLLECTION_NAME);
-      params.set("shard", SHARD1);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(COLLECTION_NAME));
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
-      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
-      assertEquals(1, shardStatus.size());
-      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD1);
-      assertNotNull(selectedShardStatus);
-
-    }
-  }
-
-
-  private void listCollection() throws IOException, SolrServerException {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.LIST.toString());
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      List<String> collections = (List<String>) rsp.get("collections");
-      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
-      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
-      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
-      assertTrue(COLLECTION_NAME1 + " was not found in list", collections.contains(COLLECTION_NAME1));
-    }
-
-  }
-
-  private void clusterStatusNoCollection() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(COLLECTION_NAME1));
-      assertEquals(4, collections.size());
-
-      List<String> liveNodes = (List<String>) cluster.get("live_nodes");
-      assertNotNull("Live nodes should not be null", liveNodes);
-      assertFalse(liveNodes.isEmpty());
-    }
-
-  }
-
-  private void clusterStatusWithCollection() throws IOException, SolrServerException {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", COLLECTION_NAME);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
-      assertNotNull(collection);
-      assertEquals("conf1", collection.get("configName"));
-//      assertEquals("1", collection.get("nrtReplicas"));
-    }
-  }
-
-  private void clusterStatusZNodeVersion() throws Exception {
-    String cname = "clusterStatusZNodeVersion";
-    try (CloudSolrClient client = createCloudClient(null)) {
-      setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).setMaxShardsPerNode(1)).process(client);
-      assertV2CallsCount();
-      waitForRecoveriesToFinish(cname, true);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", cname);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(cname);
-      assertNotNull(collection);
-      assertEquals("conf1", collection.get("configName"));
-      Integer znodeVersion = (Integer) collection.get("znodeVersion");
-      assertNotNull(znodeVersion);
-
-      CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
-      setV2(addReplica);
-      addReplica.process(client);
-      assertV2CallsCount();
-      waitForRecoveriesToFinish(cname, true);
-
-      rsp = client.request(request);
-      cluster = (NamedList<Object>) rsp.get("cluster");
-      collections = (NamedList<Object>) cluster.get("collections");
-      collection = (Map<String, Object>) collections.get(cname);
-      Integer newVersion = (Integer) collection.get("znodeVersion");
-      assertNotNull(newVersion);
-      assertTrue(newVersion > znodeVersion);
-    }
-  }
-
-  private static long totalexpectedV2Calls;
-
-  public static SolrRequest setV2(SolrRequest req) {
-    if (V2Request.v2Calls.get() == null) V2Request.v2Calls.set(new AtomicLong());
-    totalexpectedV2Calls = V2Request.v2Calls.get().get();
-    if (random().nextBoolean()) {
-      req.setUseV2(true);
-      req.setUseBinaryV2(random().nextBoolean());
-      totalexpectedV2Calls++;
-    }
-    return req;
-  }
-
-  public static void assertV2CallsCount() {
-    assertEquals(totalexpectedV2Calls, V2Request.v2Calls.get().get());
-  }
-
-  private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
-    try (CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION)) {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField("id", "a!123"); // goes to shard2. see ShardRoutingTest for details
-      client.add(doc);
-      client.commit();
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", DEFAULT_COLLECTION);
-      params.set(ShardParams._ROUTE_, "a!");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(DEFAULT_COLLECTION));
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
-      assertEquals("conf1", collection.get("configName"));
-      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
-      assertEquals(1, shardStatus.size());
-      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD2);
-      assertNotNull(selectedShardStatus);
-    }
-  }
-
-  private void clusterStatusAliasTest() throws Exception  {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
-      params.set("name", "myalias");
-      params.set("collections", DEFAULT_COLLECTION + "," + COLLECTION_NAME);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-      params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", DEFAULT_COLLECTION);
-      request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-
-
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      Map<String, String> aliases = (Map<String, String>) cluster.get("aliases");
-      assertNotNull("Aliases should not be null", aliases);
-      assertEquals("Alias: myalias not found in cluster status",
-          DEFAULT_COLLECTION + "," + COLLECTION_NAME, aliases.get("myalias"));
-
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(DEFAULT_COLLECTION));
-      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
-      assertEquals("conf1", collection.get("configName"));
-      List<String> collAlias = (List<String>) collection.get("aliases");
-      assertEquals("Aliases not found", Lists.newArrayList("myalias"), collAlias);
-    }
-  }
-
-  private void clusterStatusRolesTest() throws Exception  {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.ADDROLE.toString());
-      params.set("node", replica.getNodeName());
-      params.set("role", "overseer");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-
-      params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", DEFAULT_COLLECTION);
-      request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      Map<String, Object> roles = (Map<String, Object>) cluster.get("roles");
-      assertNotNull("Role information should not be null", roles);
-      List<String> overseer = (List<String>) roles.get("overseer");
-      assertNotNull(overseer);
-      assertEquals(1, overseer.size());
-      assertTrue(overseer.contains(replica.getNodeName()));
-    }
-  }
-
-  private void clusterStatusBadCollectionTest() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", "bad_collection_name");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail("Collection does not exist. An exception should be thrown");
-      } catch (SolrException e) {
-        //expected
-        assertTrue(e.getMessage().contains("Collection: bad_collection_name not found"));
-      }
-    }
-  }
-
-  private void replicaPropTest() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
-      List<String> sliceList = new ArrayList<>(slices.keySet());
-      String c1_s1 = sliceList.get(0);
-      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
-      String c1_s1_r1 = replicasList.get(0);
-      String c1_s1_r2 = replicasList.get(1);
-
-      String c1_s2 = sliceList.get(1);
-      replicasList = new ArrayList<>(slices.get(c1_s2).getReplicasMap().keySet());
-      String c1_s2_r1 = replicasList.get(0);
-      String c1_s2_r2 = replicasList.get(1);
-
-
-      slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME1).getSlicesMap();
-      sliceList = new ArrayList<>(slices.keySet());
-      String c2_s1 = sliceList.get(0);
-      replicasList = new ArrayList<>(slices.get(c2_s1).getReplicasMap().keySet());
-      String c2_s1_r1 = replicasList.get(0);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString());
-
-      // Insure we get error returns when omitting required parameters
-
-      missingParamsError(client, params);
-      params.set("collection", COLLECTION_NAME);
-      missingParamsError(client, params);
-      params.set("shard", c1_s1);
-      missingParamsError(client, params);
-      params.set("replica", c1_s1_r1);
-      missingParamsError(client, params);
-      params.set("property", "preferredLeader");
-      missingParamsError(client, params);
-      params.set("property.value", "true");
-
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-
-      // The above should have set exactly one preferredleader...
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r2,
-          "property", "preferredLeader",
-          "property.value", "true");
-      // The preferred leader property for shard1 should have switched to the other replica.
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s2,
-          "replica", c1_s2_r1,
-          "property", "preferredLeader",
-          "property.value", "true");
-
-      // Now we should have a preferred leader in both shards...
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME1,
-          "shard", c2_s1,
-          "replica", c2_s1_r1,
-          "property", "preferredLeader",
-          "property.value", "true");
-
-      // Now we should have three preferred leaders.
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME1, c2_s1_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME1,
-          "shard", c2_s1,
-          "replica", c2_s1_r1,
-          "property", "preferredLeader");
-
-      // Now we should have two preferred leaders.
-      // But first we have to wait for the overseer to finish the action
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      // Try adding an arbitrary property to one that has the leader property
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "testprop",
-          "property.value", "true");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r2,
-          "property", "prop",
-          "property.value", "silly");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "testprop",
-          "property.value", "nonsense",
-          SHARD_UNIQUE, "true");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "nonsense");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "property.testprop",
-          "property.value", "true",
-          SHARD_UNIQUE, "false");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "property.testprop");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-            "collection", COLLECTION_NAME,
-            "shard", c1_s1,
-            "replica", c1_s1_r1,
-            "property", "preferredLeader",
-            "property.value", "true",
-            SHARD_UNIQUE, "false");
-        fail("Should have thrown an exception, setting shardUnique=false is not allowed for 'preferredLeader'.");
-      } catch (SolrException se) {
-        assertTrue("Should have received a specific error message",
-            se.getMessage().contains("with the shardUnique parameter set to something other than 'true'"));
-      }
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      Map<String, String> origProps = getProps(client, COLLECTION_NAME, c1_s1_r1,
-          "state", "core", "node_name", "base_url");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "state",
-          "property.value", "state_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "core",
-          "property.value", "core_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "node_name",
-          "property.value", "node_name_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "base_url",
-          "property.value", "base_url_bad");
-
-      // The above should be on new proeprties.
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "state", "state_bad");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "core", "core_bad");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "node_name", "node_name_bad");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "base_url", "base_url_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "state");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "core");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "node_name");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "base_url");
-
-      // They better not have been changed!
-      for (Map.Entry<String, String> ent : origProps.entrySet()) {
-        verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, ent.getKey(), ent.getValue());
-      }
-
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "state");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "core");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "node_name");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "base_url");
-
-    }
-  }
-
-  private void testClusterStateMigration() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-
-      CollectionAdminRequest.createCollection("testClusterStateMigration","conf1",1,1).setStateFormat(1).process(client);
-
-      waitForRecoveriesToFinish("testClusterStateMigration", true);
-
-      assertEquals(1, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
-
-      for (int i = 0; i < 10; i++) {
-        SolrInputDocument doc = new SolrInputDocument();
-        doc.addField("id", "id_" + i);
-        client.add("testClusterStateMigration", doc);
-      }
-      client.commit("testClusterStateMigration");
-
-      CollectionAdminRequest.migrateCollectionFormat("testClusterStateMigration").process(client);
-
-      client.getZkStateReader().forceUpdateCollection("testClusterStateMigration");
-
-      assertEquals(2, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
-
-      QueryResponse response = client.query("testClusterStateMigration", new SolrQuery("*:*"));
-      assertEquals(10, response.getResults().getNumFound());
-    }
-  }
-  
-  private void testCollectionCreationCollectionNameValidation() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
-      params.set("name", "invalid@name#with$weird%characters");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid collection"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("collection names must consist entirely of"));
-      }
-    }
-  }
-  
-  private void testCollectionCreationShardNameValidation() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
-      params.set("name", "valid_collection_name");
-      params.set("router.name", "implicit");
-      params.set("numShards", "1");
-      params.set("shards", "invalid@name#with$weird%characters");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid shard"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("shard names must consist entirely of"));
-      }
-    }
-  }
-  
-  private void testAliasCreationNameValidation() throws Exception{
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
-      params.set("name", "invalid@name#with$weird%characters");
-      params.set("collections", COLLECTION_NAME);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid alias"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("alias names must consist entirely of"));
-      }
-    }
-  }
-
-  private void testShardCreationNameValidation() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      // Create a collection w/ implicit router
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
-      params.set("name", "valid_collection_name");
-      params.set("shards", "a");
-      params.set("router.name", "implicit");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-
-      params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATESHARD.toString());
-      params.set("collection", "valid_collection_name");
-      params.set("shard", "invalid@name#with$weird%characters");
-
-      request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid shard"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("shard names must consist entirely of"));
-      }
-    }
-  }
-
-  // Expects the map will have keys, but blank values.
-  private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
-      throws KeeperException, InterruptedException {
-
-    client.getZkStateReader().forceUpdateCollection(collectionName);
-    ClusterState clusterState = client.getZkStateReader().getClusterState();
-    final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-    if (docCollection == null || docCollection.getReplica(replicaName) == null) {
-      fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-    }
-    Replica replica = docCollection.getReplica(replicaName);
-    Map<String, String> propMap = new HashMap<>();
-    for (String prop : props) {
-      propMap.put(prop, replica.getProperty(prop));
-    }
-    return propMap;
-  }
-  private void missingParamsError(CloudSolrClient client, ModifiableSolrParams origParams)
-      throws IOException, SolrServerException {
-
-    SolrRequest request;
-    try {
-      request = new QueryRequest(origParams);
-      request.setPath("/admin/collections");
-      client.request(request);
-      fail("Should have thrown a SolrException due to lack of a required parameter.");
-    } catch (SolrException se) {
-      assertTrue("Should have gotten a specific message back mentioning 'missing required parameter'. Got: " + se.getMessage(),
-          se.getMessage().toLowerCase(Locale.ROOT).contains("missing required parameter:"));
-    }
-  }
-}


[05/41] lucene-solr:jira/solr-11702: LUCENE-8122: Updata autogenerated code after update to ICU4J 60.2

Posted by da...@apache.org.
LUCENE-8122: Updata autogenerated code after update to ICU4J 60.2


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d99bfa4b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d99bfa4b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d99bfa4b

Branch: refs/heads/jira/solr-11702
Commit: d99bfa4bdb3442581bd9559b289887a8bc44c957
Parents: e4438a2
Author: Uwe Schindler <us...@apache.org>
Authored: Mon Jan 15 16:21:56 2018 +0100
Committer: Uwe Schindler <us...@apache.org>
Committed: Mon Jan 15 16:21:56 2018 +0100

----------------------------------------------------------------------
 .../lucene/analysis/util/UnicodeProps.java      | 116 +++++++++----------
 1 file changed, 58 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d99bfa4b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/UnicodeProps.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/UnicodeProps.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/UnicodeProps.java
index 00ee311..254977f 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/UnicodeProps.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/UnicodeProps.java
@@ -1,58 +1,58 @@
-// DO NOT EDIT THIS FILE! Use "ant unicode-data" to recreate.
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.lucene.analysis.util;
-
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.SparseFixedBitSet;
-
-/**
- * This file contains unicode properties used by various {@link CharTokenizer}s.
- * The data was created using ICU4J v59.1.0.0
- * <p>
- * Unicode version: 9.0.0.0
- */
-public final class UnicodeProps {
-  private UnicodeProps() {}
-  
-  /** Unicode version that was used to generate this file: {@value} */
-  public static final String UNICODE_VERSION = "9.0.0.0";
-  
-  /** Bitset with Unicode WHITESPACE code points. */
-  public static final Bits WHITESPACE = createBits(
-    0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 
-    0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000);
-  
-  private static Bits createBits(final int... codepoints) {
-    final int len = codepoints[codepoints.length - 1] + 1;
-    final SparseFixedBitSet bitset = new SparseFixedBitSet(len);
-    for (int i : codepoints) bitset.set(i);
-    return new Bits() {
-      @Override
-      public boolean get(int index) {
-        return index < len && bitset.get(index);
-      }
-      
-      @Override
-      public int length() {
-        return 0x10FFFF + 1;
-      }
-    };
-  }
-}
+// DO NOT EDIT THIS FILE! Use "ant unicode-data" to recreate.
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis.util;
+
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.SparseFixedBitSet;
+
+/**
+ * This file contains unicode properties used by various {@link CharTokenizer}s.
+ * The data was created using ICU4J v60.2.0.0
+ * <p>
+ * Unicode version: 10.0.0.0
+ */
+public final class UnicodeProps {
+  private UnicodeProps() {}
+  
+  /** Unicode version that was used to generate this file: {@value} */
+  public static final String UNICODE_VERSION = "10.0.0.0";
+  
+  /** Bitset with Unicode WHITESPACE code points. */
+  public static final Bits WHITESPACE = createBits(
+    0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 
+    0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000);
+  
+  private static Bits createBits(final int... codepoints) {
+    final int len = codepoints[codepoints.length - 1] + 1;
+    final SparseFixedBitSet bitset = new SparseFixedBitSet(len);
+    for (int i : codepoints) bitset.set(i);
+    return new Bits() {
+      @Override
+      public boolean get(int index) {
+        return index < len && bitset.get(index);
+      }
+      
+      @Override
+      public int length() {
+        return 0x10FFFF + 1;
+      }
+    };
+  }
+}


[24/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
SOLR-11817: Move Collections API classes to it's own package


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a3c4f738
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a3c4f738
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a3c4f738

Branch: refs/heads/jira/solr-11702
Commit: a3c4f7388c13cfdeb66d83b434b991e5e159d4cc
Parents: e2bba98
Author: Varun Thacker <va...@apache.org>
Authored: Mon Jan 15 18:07:34 2018 -0800
Committer: Varun Thacker <va...@apache.org>
Committed: Tue Jan 16 11:03:40 2018 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                                |    2 +
 .../org/apache/solr/cloud/AddReplicaCmd.java    |  279 -----
 .../src/java/org/apache/solr/cloud/Assign.java  |  483 ---------
 .../java/org/apache/solr/cloud/BackupCmd.java   |  225 ----
 .../solr/cloud/CloudConfigSetService.java       |    1 +
 .../java/org/apache/solr/cloud/CloudUtil.java   |    2 +-
 .../org/apache/solr/cloud/CreateAliasCmd.java   |  101 --
 .../apache/solr/cloud/CreateCollectionCmd.java  |  533 ---------
 .../org/apache/solr/cloud/CreateShardCmd.java   |  191 ----
 .../apache/solr/cloud/CreateSnapshotCmd.java    |  179 ---
 .../org/apache/solr/cloud/DeleteAliasCmd.java   |   43 -
 .../apache/solr/cloud/DeleteCollectionCmd.java  |  141 ---
 .../org/apache/solr/cloud/DeleteNodeCmd.java    |  137 ---
 .../org/apache/solr/cloud/DeleteReplicaCmd.java |  281 -----
 .../org/apache/solr/cloud/DeleteShardCmd.java   |  178 ---
 .../apache/solr/cloud/DeleteSnapshotCmd.java    |  160 ---
 .../solr/cloud/ExclusiveSliceProperty.java      |    5 +-
 .../solr/cloud/LeaderRecoveryWatcher.java       |   88 --
 .../java/org/apache/solr/cloud/MigrateCmd.java  |  337 ------
 .../org/apache/solr/cloud/MoveReplicaCmd.java   |  302 ------
 .../java/org/apache/solr/cloud/Overseer.java    |    1 +
 .../OverseerCollectionConfigSetProcessor.java   |    1 +
 .../cloud/OverseerCollectionMessageHandler.java | 1003 -----------------
 .../org/apache/solr/cloud/OverseerRoleCmd.java  |  102 --
 .../apache/solr/cloud/OverseerStatusCmd.java    |  112 --
 .../org/apache/solr/cloud/ReplaceNodeCmd.java   |  226 ----
 .../java/org/apache/solr/cloud/RestoreCmd.java  |  363 -------
 .../cloud/RoutedAliasCreateCollectionCmd.java   |  182 ----
 .../org/apache/solr/cloud/SplitShardCmd.java    |  542 ----------
 .../org/apache/solr/cloud/UtilizeNodeCmd.java   |  120 ---
 .../cloud/api/collections/AddReplicaCmd.java    |  282 +++++
 .../solr/cloud/api/collections/Assign.java      |  483 +++++++++
 .../solr/cloud/api/collections/BackupCmd.java   |  224 ++++
 .../cloud/api/collections/CreateAliasCmd.java   |  100 ++
 .../api/collections/CreateCollectionCmd.java    |  531 +++++++++
 .../cloud/api/collections/CreateShardCmd.java   |  190 ++++
 .../api/collections/CreateSnapshotCmd.java      |  179 +++
 .../cloud/api/collections/DeleteAliasCmd.java   |   43 +
 .../api/collections/DeleteCollectionCmd.java    |  142 +++
 .../cloud/api/collections/DeleteNodeCmd.java    |  137 +++
 .../cloud/api/collections/DeleteReplicaCmd.java |  280 +++++
 .../cloud/api/collections/DeleteShardCmd.java   |  178 +++
 .../api/collections/DeleteSnapshotCmd.java      |  160 +++
 .../api/collections/LeaderRecoveryWatcher.java  |   88 ++
 .../solr/cloud/api/collections/MigrateCmd.java  |  334 ++++++
 .../cloud/api/collections/MoveReplicaCmd.java   |  303 ++++++
 .../OverseerCollectionMessageHandler.java       | 1011 +++++++++++++++++
 .../cloud/api/collections/OverseerRoleCmd.java  |  102 ++
 .../api/collections/OverseerStatusCmd.java      |  113 ++
 .../cloud/api/collections/ReplaceNodeCmd.java   |  227 ++++
 .../solr/cloud/api/collections/RestoreCmd.java  |  357 ++++++
 .../RoutedAliasCreateCollectionCmd.java         |  184 ++++
 .../cloud/api/collections/SplitShardCmd.java    |  540 ++++++++++
 .../cloud/api/collections/UtilizeNodeCmd.java   |  120 +++
 .../cloud/api/collections/package-info.java     |   23 +
 .../cloud/overseer/ClusterStateMutator.java     |    2 +-
 .../solr/cloud/overseer/ReplicaMutator.java     |   12 +-
 .../solr/cloud/overseer/SliceMutator.java       |   13 +-
 .../solr/handler/admin/CollectionsHandler.java  |   24 +-
 .../TimeRoutedAliasUpdateProcessor.java         |    2 +-
 .../AbstractCloudBackupRestoreTestCase.java     |  346 ------
 .../test/org/apache/solr/cloud/AssignTest.java  |  155 ---
 .../solr/cloud/BasicDistributedZkTest.java      |    1 +
 .../solr/cloud/ChaosMonkeyShardSplitTest.java   |    5 +
 .../apache/solr/cloud/CollectionReloadTest.java |   84 --
 .../cloud/CollectionTooManyReplicasTest.java    |  221 ----
 .../CollectionsAPIAsyncDistributedZkTest.java   |  177 ---
 .../cloud/CollectionsAPIDistributedZkTest.java  |  684 ------------
 ...ConcurrentDeleteAndCreateCollectionTest.java |  226 ----
 .../apache/solr/cloud/CustomCollectionTest.java |  198 ----
 ...verseerCollectionConfigSetProcessorTest.java |   28 +-
 .../solr/cloud/OverseerTaskQueueTest.java       |    1 +
 .../solr/cloud/ReplicaPropertiesBase.java       |  177 ---
 .../org/apache/solr/cloud/ShardSplitTest.java   | 1015 -----------------
 .../cloud/SimpleCollectionCreateDeleteTest.java |   64 --
 .../apache/solr/cloud/TestCollectionAPI.java    |  797 --------------
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  295 -----
 .../solr/cloud/TestHdfsCloudBackupRestore.java  |  203 ----
 .../cloud/TestLocalFSCloudBackupRestore.java    |   57 -
 .../solr/cloud/TestReplicaProperties.java       |  236 ----
 .../cloud/TestRequestStatusCollectionAPI.java   |  197 ----
 .../AbstractCloudBackupRestoreTestCase.java     |  348 ++++++
 .../solr/cloud/api/collections/AssignTest.java  |  156 +++
 .../api/collections/CollectionReloadTest.java   |   85 ++
 .../CollectionTooManyReplicasTest.java          |  222 ++++
 .../CollectionsAPIAsyncDistributedZkTest.java   |  178 +++
 .../CollectionsAPIDistributedZkTest.java        |  686 ++++++++++++
 ...ConcurrentDeleteAndCreateCollectionTest.java |  227 ++++
 .../api/collections/CustomCollectionTest.java   |  199 ++++
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 +++
 .../api/collections/ReplicaPropertiesBase.java  |  178 +++
 .../cloud/api/collections/ShardSplitTest.java   | 1017 ++++++++++++++++++
 .../SimpleCollectionCreateDeleteTest.java       |   66 ++
 .../api/collections/TestCollectionAPI.java      |  795 ++++++++++++++
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  297 +++++
 .../collections/TestHdfsCloudBackupRestore.java |  207 ++++
 .../TestLocalFSCloudBackupRestore.java          |   57 +
 .../api/collections/TestReplicaProperties.java  |  236 ++++
 .../TestRequestStatusCollectionAPI.java         |  198 ++++
 .../cloud/autoscaling/sim/SimCloudManager.java  |    2 +-
 .../sim/SimClusterStateProvider.java            |   14 +-
 .../cloud/cdcr/BaseCdcrDistributedZkTest.java   |   11 +-
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 ---
 .../cloud/AbstractFullDistribZkTestBase.java    |   40 +-
 .../apache/solr/cloud/MiniSolrCloudCluster.java |    2 +-
 .../solr/cloud/MiniSolrCloudClusterTest.java    |    2 +-
 106 files changed, 11761 insertions(+), 11685 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 45a9a59..187976d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -150,6 +150,8 @@ Other Changes
 
 * SOLR-11218: Fail and return an error when attempting to delete a collection that's part of an alias (Erick Erickson)
 
+* SOLR-11817: Move Collections API classes to it's own package (Varun Thacker)
+
 ==================  7.2.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
deleted file mode 100644
index 71a54c14..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-
-public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    addReplica(state, message, results, null);
-  }
-
-  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws IOException, InterruptedException {
-    log.debug("addReplica() : {}", Utils.toJSONString(message));
-    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-    final String asyncId = message.getStr(ASYNC);
-
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    message = assignReplicaDetails(ocmh.cloudManager, clusterState, message, sessionWrapper);
-
-    String collection = message.getStr(COLLECTION_PROP);
-    DocCollection coll = clusterState.getCollection(collection);
-
-    String node = message.getStr(CoreAdminParams.NODE);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
-    boolean parallel = message.getBool("parallel", false);
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    if (!Overseer.isLegacy(zkStateReader)) {
-      if (!skipCreateReplicaInClusterState) {
-        ZkNodeProps props = new ZkNodeProps(
-            Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collection,
-            ZkStateReader.SHARD_ID_PROP, shard,
-            ZkStateReader.CORE_NAME_PROP, coreName,
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node),
-            ZkStateReader.NODE_NAME_PROP, node,
-            ZkStateReader.REPLICA_TYPE, replicaType.name());
-        if (coreNodeName != null) {
-          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
-        }
-        try {
-          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        } catch (Exception e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception updating Overseer state queue", e);
-        }
-      }
-      params.set(CoreAdminParams.CORE_NODE_NAME,
-          ocmh.waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
-    }
-
-    String configName = zkStateReader.readConfigName(collection);
-    String routeKey = message.getStr(ShardParams._ROUTE_);
-    String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
-    String ulogDir = message.getStr(CoreAdminParams.ULOG_DIR);
-    String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
-
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-    params.set(CoreAdminParams.NAME, coreName);
-    params.set(COLL_CONF, configName);
-    params.set(CoreAdminParams.COLLECTION, collection);
-    params.set(CoreAdminParams.REPLICA_TYPE, replicaType.name());
-    if (shard != null) {
-      params.set(CoreAdminParams.SHARD, shard);
-    } else if (routeKey != null) {
-      Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
-      if (slices.isEmpty()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
-      } else {
-        params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
-      }
-    } else {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
-    }
-    if (dataDir != null) {
-      params.set(CoreAdminParams.DATA_DIR, dataDir);
-    }
-    if (ulogDir != null) {
-      params.set(CoreAdminParams.ULOG_DIR, ulogDir);
-    }
-    if (instanceDir != null) {
-      params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
-    }
-    if (coreNodeName != null) {
-      params.set(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
-    }
-    ocmh.addPropertyParams(message, params);
-
-    // For tracking async calls.
-    Map<String,String> requestMap = new HashMap<>();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-    ocmh.sendShardRequest(node, params, shardHandler, asyncId, requestMap);
-
-    final String fnode = node;
-    final String fcoreName = coreName;
-
-    Runnable runnable = () -> {
-      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
-      ocmh.waitForCoreNodeName(collection, fnode, fcoreName);
-      if (sessionWrapper.get() != null) {
-        sessionWrapper.get().release();
-      }
-      if (onComplete != null) onComplete.run();
-    };
-
-    if (!parallel || waitForFinalState) {
-      if (waitForFinalState) {
-        SolrCloseableLatch latch = new SolrCloseableLatch(1, ocmh);
-        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collection, null, Collections.singletonList(coreName), latch);
-        try {
-          zkStateReader.registerCollectionStateWatcher(collection, watcher);
-          runnable.run();
-          if (!latch.await(timeout, TimeUnit.SECONDS)) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
-          }
-        } finally {
-          zkStateReader.removeCollectionStateWatcher(collection, watcher);
-        }
-      } else {
-        runnable.run();
-      }
-    } else {
-      ocmh.tpe.submit(runnable);
-    }
-
-
-    return new ZkNodeProps(
-        ZkStateReader.COLLECTION_PROP, collection,
-        ZkStateReader.SHARD_ID_PROP, shard,
-        ZkStateReader.CORE_NAME_PROP, coreName,
-        ZkStateReader.NODE_NAME_PROP, node
-    );
-  }
-
-  public static ZkNodeProps assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                 ZkNodeProps message, AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-
-    String collection = message.getStr(COLLECTION_PROP);
-    String node = message.getStr(CoreAdminParams.NODE);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
-    if (StringUtils.isBlank(coreName)) {
-      coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
-    }
-
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-    }
-    if (coll.getSlice(shard) == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Collection: " + collection + " shard: " + shard + " does not exist");
-    }
-
-    // Kind of unnecessary, but it does put the logic of whether to override maxShardsPerNode in one place.
-    if (!skipCreateReplicaInClusterState) {
-      if (CloudUtil.usePolicyFramework(coll, cloudManager)) {
-        if (node == null) {
-          if(coll.getPolicyName() != null) message.getProperties().put(Policy.POLICY, coll.getPolicyName());
-          node = Assign.identifyNodes(cloudManager,
-              clusterState,
-              Collections.emptyList(),
-              collection,
-              message,
-              Collections.singletonList(shard),
-              replicaType == Replica.Type.NRT ? 0 : 1,
-              replicaType == Replica.Type.TLOG ? 0 : 1,
-              replicaType == Replica.Type.PULL ? 0 : 1
-          ).get(0).node;
-          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-        }
-      } else {
-        node = Assign.getNodesForNewReplicas(clusterState, collection, shard, 1, node,
-            cloudManager).get(0).nodeName;// TODO: use replica type in this logic too
-      }
-    }
-    log.info("Node Identified {} for creating new replica", node);
-
-    if (!clusterState.liveNodesContain(node)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
-    }
-    if (coreName == null) {
-      coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType);
-    } else if (!skipCreateReplicaInClusterState) {
-      //Validate that the core name is unique in that collection
-      for (Slice slice : coll.getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          String replicaCoreName = replica.getStr(CORE_NAME_PROP);
-          if (coreName.equals(replicaCoreName)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Another replica with the same core name already exists" +
-                " for this collection");
-          }
-        }
-      }
-    }
-    if (coreNodeName != null) {
-      message = message.plus(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
-    }
-    message = message.plus(CoreAdminParams.NAME, coreName);
-    message = message.plus(CoreAdminParams.NODE, node);
-    return message;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Assign.java b/solr/core/src/java/org/apache/solr/cloud/Assign.java
deleted file mode 100644
index c746c94..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/Assign.java
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.rule.ReplicaAssigner;
-import org.apache.solr.cloud.rule.Rule;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.NumberUtils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE_DEFAULT;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-
-public class Assign {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static int incAndGetId(DistribStateManager stateManager, String collection, int defaultValue) {
-    String path = "/collections/"+collection;
-    try {
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.makePath(path);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-      path += "/counter";
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.createData(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    } catch (IOException | KeeperException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    }
-
-    while (true) {
-      try {
-        int version = 0;
-        int currentId = 0;
-        VersionedData data = stateManager.getData(path, null);
-        if (data != null) {
-          currentId = NumberUtils.bytesToInt(data.getData());
-          version = data.getVersion();
-        }
-        byte[] bytes = NumberUtils.intToBytes(++currentId);
-        stateManager.setData(path, bytes, version);
-        return currentId;
-      } catch (BadVersionException e) {
-        continue;
-      } catch (IOException | KeeperException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
-      }
-    }
-  }
-
-  public static String assignCoreNodeName(DistribStateManager stateManager, DocCollection collection) {
-    // for backward compatibility;
-    int defaultValue = defaultCounterValue(collection, false);
-    String coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    while (collection.getReplica(coreNodeName) != null) {
-      // there is wee chance that, the new coreNodeName id not totally unique,
-      // but this will be guaranteed unique for new collections
-      coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    }
-    return coreNodeName;
-  }
-
-  /**
-   * Assign a new unique id up to slices count - then add replicas evenly.
-   *
-   * @return the assigned shard id
-   */
-  public static String assignShard(DocCollection collection, Integer numShards) {
-    if (numShards == null) {
-      numShards = 1;
-    }
-    String returnShardId = null;
-    Map<String, Slice> sliceMap = collection != null ? collection.getActiveSlicesMap() : null;
-
-
-    // TODO: now that we create shards ahead of time, is this code needed?  Esp since hash ranges aren't assigned when creating via this method?
-
-    if (sliceMap == null) {
-      return "shard1";
-    }
-
-    List<String> shardIdNames = new ArrayList<>(sliceMap.keySet());
-
-    if (shardIdNames.size() < numShards) {
-      return "shard" + (shardIdNames.size() + 1);
-    }
-
-    // TODO: don't need to sort to find shard with fewest replicas!
-
-    // else figure out which shard needs more replicas
-    final Map<String, Integer> map = new HashMap<>();
-    for (String shardId : shardIdNames) {
-      int cnt = sliceMap.get(shardId).getReplicasMap().size();
-      map.put(shardId, cnt);
-    }
-
-    Collections.sort(shardIdNames, (String o1, String o2) -> {
-      Integer one = map.get(o1);
-      Integer two = map.get(o2);
-      return one.compareTo(two);
-    });
-
-    returnShardId = shardIdNames.get(0);
-    return returnShardId;
-  }
-
-  private static String buildSolrCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
-    // TODO: Adding the suffix is great for debugging, but may be an issue if at some point we want to support a way to change replica type
-    return String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, shard, type.name().substring(0,1).toLowerCase(Locale.ROOT), replicaNum);
-  }
-
-  private static int defaultCounterValue(DocCollection collection, boolean newCollection) {
-    if (newCollection) return 0;
-    int defaultValue = collection.getReplicas().size();
-    if (collection.getReplicationFactor() != null) {
-      // numReplicas and replicationFactor * numSlices can be not equals,
-      // in case of many addReplicas or deleteReplicas are executed
-      defaultValue = Math.max(defaultValue,
-          collection.getReplicationFactor() * collection.getSlices().size());
-    }
-    return defaultValue * 20;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
-    Slice slice = collection.getSlice(shard);
-    int defaultValue = defaultCounterValue(collection, newCollection);
-    int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-    String coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    while (existCoreName(coreName, slice)) {
-      replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-      coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    }
-    return coreName;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) {
-    return buildSolrCoreName(stateManager, collection, shard, type, false);
-  }
-
-  private static boolean existCoreName(String coreName, Slice slice) {
-    if (slice == null) return false;
-    for (Replica replica : slice.getReplicas()) {
-      if (coreName.equals(replica.getStr(CORE_NAME_PROP))) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
-    // TODO: add smarter options that look at the current number of cores per
-    // node?
-    // for now we just go random (except when createNodeSet and createNodeSet.shuffle=false are passed in)
-
-    List<String> nodeList;
-
-    final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
-    final List<String> createNodeList = (createNodeSetStr == null) ? null : StrUtils.splitSmart((CREATE_NODE_SET_EMPTY.equals(createNodeSetStr) ? "" : createNodeSetStr), ",", true);
-
-    if (createNodeList != null) {
-      nodeList = new ArrayList<>(createNodeList);
-      nodeList.retainAll(liveNodes);
-      if (message.getBool(CREATE_NODE_SET_SHUFFLE, CREATE_NODE_SET_SHUFFLE_DEFAULT)) {
-        Collections.shuffle(nodeList, random);
-      }
-    } else {
-      nodeList = new ArrayList<>(liveNodes);
-      Collections.shuffle(nodeList, random);
-    }
-
-    return nodeList;
-  }
-
-  public static List<ReplicaPosition> identifyNodes(SolrCloudManager cloudManager,
-                                                    ClusterState clusterState,
-                                                    List<String> nodeList,
-                                                    String collectionName,
-                                                    ZkNodeProps message,
-                                                    List<String> shardNames,
-                                                    int numNrtReplicas,
-                                                    int numTlogReplicas,
-                                                    int numPullReplicas) throws IOException, InterruptedException {
-    List<Map> rulesMap = (List) message.get("rule");
-    String policyName = message.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-
-    if (rulesMap == null && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      log.debug("Identify nodes using default");
-      int i = 0;
-      List<ReplicaPosition> result = new ArrayList<>();
-      for (String aShard : shardNames)
-        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
-            Replica.Type.TLOG, numTlogReplicas,
-            Replica.Type.PULL, numPullReplicas
-        ).entrySet()) {
-          for (int j = 0; j < e.getValue(); j++){
-            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
-            i++;
-          }
-        }
-      return result;
-    } else {
-      if (numTlogReplicas + numPullReplicas != 0 && rulesMap != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
-      }
-    }
-
-    if (rulesMap != null && !rulesMap.isEmpty()) {
-      List<Rule> rules = new ArrayList<>();
-      for (Object map : rulesMap) rules.add(new Rule((Map) map));
-      Map<String, Integer> sharVsReplicaCount = new HashMap<>();
-
-      for (String shard : shardNames) sharVsReplicaCount.put(shard, numNrtReplicas);
-      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
-          sharVsReplicaCount,
-          (List<Map>) message.get(SNITCH),
-          new HashMap<>(),//this is a new collection. So, there are no nodes in any shard
-          nodeList,
-          cloudManager,
-          clusterState);
-
-      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
-      return nodeMappings.entrySet().stream()
-          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
-          .collect(Collectors.toList());
-    } else  {
-      if (message.getStr(CREATE_NODE_SET) == null)
-        nodeList = Collections.emptyList();// unless explicitly specified do not pass node list to Policy
-      return getPositionsUsingPolicy(collectionName,
-          shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, cloudManager, nodeList);
-    }
-  }
-
-  static class ReplicaCount {
-    public final String nodeName;
-    public int thisCollectionNodes = 0;
-    public int totalNodes = 0;
-
-    ReplicaCount(String nodeName) {
-      this.nodeName = nodeName;
-    }
-
-    public int weight() {
-      return (thisCollectionNodes * 100) + totalNodes;
-    }
-  }
-
-  // Only called from createShard and addReplica (so far).
-  //
-  // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
-  // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
-  public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
-                                                          String shard, int nrtReplicas,
-                                                          Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException {
-    log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
-    List<String> createNodeList = null;
-
-    if (createNodeSet instanceof List) {
-      createNodeList = (List) createNodeSet;
-    } else {
-      createNodeList = createNodeSet == null ? null : StrUtils.splitSmart((String) createNodeSet, ",", true);
-    }
-
-     HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
-
-    if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
-      int availableSlots = 0;
-      for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
-        //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
-        if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
-          availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
-        }
-      }
-      if (availableSlots < nrtReplicas) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of live nodes and a maxShardsPerNode of %d",
-                nrtReplicas, collectionName, maxShardsPerNode));
-      }
-    }
-
-    List l = (List) coll.get(DocCollection.RULE);
-    List<ReplicaPosition> replicaPositions = null;
-    if (l != null) {
-      // TODO: make it so that this method doesn't require access to CC
-      replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cloudManager, coll, createNodeList, l);
-    }
-    String policyName = coll.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, 0, 0,
-          policyName, cloudManager, createNodeList);
-    }
-
-    if(replicaPositions != null){
-      List<ReplicaCount> repCounts = new ArrayList<>();
-      for (ReplicaPosition p : replicaPositions) {
-        repCounts.add(new ReplicaCount(p.node));
-      }
-      return repCounts;
-    }
-
-    ArrayList<ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
-    Collections.sort(sortedNodeList, (x, y) -> (x.weight() < y.weight()) ? -1 : ((x.weight() == y.weight()) ? 0 : 1));
-    return sortedNodeList;
-
-  }
-
-  public static List<ReplicaPosition> getPositionsUsingPolicy(String collName, List<String> shardNames,
-                                                              int nrtReplicas,
-                                                              int tlogReplicas,
-                                                              int pullReplicas,
-                                                              String policyName, SolrCloudManager cloudManager,
-                                                              List<String> nodesList) throws IOException, InterruptedException {
-    log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
-    List<ReplicaPosition> replicaPositions = null;
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    try {
-      Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
-      replicaPositions = PolicyHelper.getReplicaLocations(
-          collName,
-          autoScalingConfig,
-          cloudManager,
-          kvMap,
-          shardNames,
-          nrtReplicas,
-          tlogReplicas,
-          pullReplicas,
-          nodesList);
-      return replicaPositions;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error getting replica locations", e);
-    } finally {
-      if (log.isTraceEnabled()) {
-        if (replicaPositions != null)
-          log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
-        log.trace("AUTOSCALING_CONF: " + Utils.toJSONString(autoScalingConfig));
-      }
-    }
-  }
-
-  private static List<ReplicaPosition> getNodesViaRules(ClusterState clusterState, String shard, int numberOfNodes,
-                                                        SolrCloudManager cloudManager, DocCollection coll, List<String> createNodeList, List l) {
-    ArrayList<Rule> rules = new ArrayList<>();
-    for (Object o : l) rules.add(new Rule((Map) o));
-    Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
-    for (Slice slice : coll.getSlices()) {
-      LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
-      shardVsNodes.put(slice.getName(), n);
-      for (Replica replica : slice.getReplicas()) {
-        Integer count = n.get(replica.getNodeName());
-        if (count == null) count = 0;
-        n.put(replica.getNodeName(), ++count);
-      }
-    }
-    List snitches = (List) coll.get(SNITCH);
-    List<String> nodesList = createNodeList == null ?
-        new ArrayList<>(clusterState.getLiveNodes()) :
-        createNodeList;
-    Map<ReplicaPosition, String> positions = new ReplicaAssigner(
-        rules,
-        Collections.singletonMap(shard, numberOfNodes),
-        snitches,
-        shardVsNodes,
-        nodesList, cloudManager, clusterState).getNodeMappings();
-
-    return positions.entrySet().stream().map(e -> e.getKey().setNode(e.getValue())).collect(Collectors.toList());// getReplicaCounts(positions);
-  }
-
-  private static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
-                                                                       ClusterState clusterState, List<String> createNodeList) {
-    Set<String> nodes = clusterState.getLiveNodes();
-
-    List<String> nodeList = new ArrayList<>(nodes.size());
-    nodeList.addAll(nodes);
-    if (createNodeList != null) nodeList.retainAll(createNodeList);
-
-    HashMap<String, ReplicaCount> nodeNameVsShardCount = new HashMap<>();
-    for (String s : nodeList) {
-      nodeNameVsShardCount.put(s, new ReplicaCount(s));
-    }
-    if (createNodeList != null) { // Overrides petty considerations about maxShardsPerNode
-      if (createNodeList.size() != nodeNameVsShardCount.size()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "At least one of the node(s) specified " + createNodeList + " are not currently active in "
-                + nodeNameVsShardCount.keySet() + ", no action taken.");
-      }
-      return nodeNameVsShardCount;
-    }
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
-    Map<String, DocCollection> collections = clusterState.getCollectionsMap();
-    for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
-      DocCollection c = entry.getValue();
-      //identify suitable nodes  by checking the no:of cores in each of them
-      for (Slice slice : c.getSlices()) {
-        Collection<Replica> replicas = slice.getReplicas();
-        for (Replica replica : replicas) {
-          ReplicaCount count = nodeNameVsShardCount.get(replica.getNodeName());
-          if (count != null) {
-            count.totalNodes++; // Used ot "weigh" whether this node should be used later.
-            if (entry.getKey().equals(collectionName)) {
-              count.thisCollectionNodes++;
-              if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
-            }
-          }
-        }
-      }
-    }
-
-    return nodeNameVsShardCount;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
deleted file mode 100644
index a4012f0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.time.Instant;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-
-import org.apache.lucene.util.Version;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public BackupCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String backupName = message.getStr(NAME);
-    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-
-    Instant startTime = Instant.now();
-
-    CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
-    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-    BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader);
-
-    // Backup location
-    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-    URI backupPath = repository.resolve(location, backupName);
-
-    //Validating if the directory already exists.
-    if (repository.exists(backupPath)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup directory already exists: " + backupPath);
-    }
-
-    // Create a directory to store backup details.
-    repository.createDirectory(backupPath);
-
-    String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-    switch (strategy) {
-      case CollectionAdminParams.COPY_FILES_STRATEGY: {
-        copyIndexFiles(backupPath, message, results);
-        break;
-      }
-      case CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY: {
-        break;
-      }
-    }
-
-    log.info("Starting to backup ZK data for backupName={}", backupName);
-
-    //Download the configs
-    String configName = ocmh.zkStateReader.readConfigName(collectionName);
-    backupMgr.downloadConfigDir(location, backupName, configName);
-
-    //Save the collection's state. Can be part of the monolithic clusterstate.json or a individual state.json
-    //Since we don't want to distinguish we extract the state and back it up as a separate json
-    DocCollection collectionState = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
-    backupMgr.writeCollectionState(location, backupName, collectionName, collectionState);
-
-    Properties properties = new Properties();
-
-    properties.put(BackupManager.BACKUP_NAME_PROP, backupName);
-    properties.put(BackupManager.COLLECTION_NAME_PROP, collectionName);
-    properties.put(COLL_CONF, configName);
-    properties.put(BackupManager.START_TIME_PROP, startTime.toString());
-    properties.put(BackupManager.INDEX_VERSION_PROP, Version.LATEST.toString());
-    //TODO: Add MD5 of the configset. If during restore the same name configset exists then we can compare checksums to see if they are the same.
-    //if they are not the same then we can throw an error or have an 'overwriteConfig' flag
-    //TODO save numDocs for the shardLeader. We can use it to sanity check the restore.
-
-    backupMgr.writeBackupProperties(location, backupName, properties);
-
-    log.info("Completed backing up ZK data for backupName={}", backupName);
-  }
-
-  private Replica selectReplicaWithSnapshot(CollectionSnapshotMetaData snapshotMeta, Slice slice) {
-    // The goal here is to choose the snapshot of the replica which was the leader at the time snapshot was created.
-    // If that is not possible, we choose any other replica for the given shard.
-    Collection<CoreSnapshotMetaData> snapshots = snapshotMeta.getReplicaSnapshotsForShard(slice.getName());
-
-    Optional<CoreSnapshotMetaData> leaderCore = snapshots.stream().filter(x -> x.isLeader()).findFirst();
-    if (leaderCore.isPresent()) {
-      log.info("Replica {} was the leader when snapshot {} was created.", leaderCore.get().getCoreName(), snapshotMeta.getName());
-      Replica r = slice.getReplica(leaderCore.get().getCoreName());
-      if ((r != null) && !r.getState().equals(State.DOWN)) {
-        return r;
-      }
-    }
-
-    Optional<Replica> r = slice.getReplicas().stream()
-                               .filter(x -> x.getState() != State.DOWN && snapshotMeta.isSnapshotExists(slice.getName(), x))
-                               .findFirst();
-
-    if (!r.isPresent()) {
-      throw new SolrException(ErrorCode.SERVER_ERROR,
-          "Unable to find any live replica with a snapshot named " + snapshotMeta.getName() + " for shard " + slice.getName());
-    }
-
-    return r.get();
-  }
-
-  private void copyIndexFiles(URI backupPath, ZkNodeProps request, NamedList results) throws Exception {
-    String collectionName = request.getStr(COLLECTION_PROP);
-    String backupName = request.getStr(NAME);
-    String asyncId = request.getStr(ASYNC);
-    String repoName = request.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    Map<String, String> requestMap = new HashMap<>();
-
-    String commitName = request.getStr(CoreAdminParams.COMMIT_NAME);
-    Optional<CollectionSnapshotMetaData> snapshotMeta = Optional.empty();
-    if (commitName != null) {
-      SolrZkClient zkClient = ocmh.overseer.getZkController().getZkClient();
-      snapshotMeta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
-      if (!snapshotMeta.isPresent()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
-            + " does not exist for collection " + collectionName);
-      }
-      if (snapshotMeta.get().getStatus() != SnapshotStatus.Successful) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName + " for collection " + collectionName
-            + " has not completed successfully. The status is " + snapshotMeta.get().getStatus());
-      }
-    }
-
-    log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
-        backupPath);
-
-    Collection<String> shardsToConsider = Collections.emptySet();
-    if (snapshotMeta.isPresent()) {
-      shardsToConsider = snapshotMeta.get().getShards();
-    }
-
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getActiveSlices()) {
-      Replica replica = null;
-
-      if (snapshotMeta.isPresent()) {
-        if (!shardsToConsider.contains(slice.getName())) {
-          log.warn("Skipping the backup for shard {} since it wasn't part of the collection {} when snapshot {} was created.",
-              slice.getName(), collectionName, snapshotMeta.get().getName());
-          continue;
-        }
-        replica = selectReplicaWithSnapshot(snapshotMeta.get(), slice);
-      } else {
-        // Note - Actually this can return a null value when there is no leader for this shard.
-        replica = slice.getLeader();
-        if (replica == null) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "No 'leader' replica available for shard " + slice.getName() + " of collection " + collectionName);
-        }
-      }
-
-      String coreName = replica.getStr(CORE_NAME_PROP);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
-      params.set(NAME, slice.getName());
-      params.set(CoreAdminParams.BACKUP_REPOSITORY, repoName);
-      params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
-      params.set(CORE_NAME_PROP, coreName);
-      if (snapshotMeta.isPresent()) {
-        params.set(CoreAdminParams.COMMIT_NAME, snapshotMeta.get().getName());
-      }
-
-      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-      log.debug("Sent backup request to core={} for backupName={}", coreName, backupName);
-    }
-    log.debug("Sent backup requests to all shard leaders for backupName={}", backupName);
-
-    ocmh.processResponses(results, shardHandler, true, "Could not backup all replicas", asyncId, requestMap);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java b/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
index 3cdc903..9b16d23 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
@@ -18,6 +18,7 @@ package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
 
+import org.apache.solr.cloud.api.collections.CreateCollectionCmd;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.ConfigSetService;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index 30de3d4..0d45129 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -132,7 +132,7 @@ public class CloudUtil {
 
   }
 
-  static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
+  public static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
       throws IOException, InterruptedException {
     AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
     return !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || collection.getPolicyName() != null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
deleted file mode 100644
index e10d53e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
+++ /dev/null
@@ -1,101 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-public class CreateAliasCmd implements Cmd {
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results)
-      throws Exception {
-    final String aliasName = message.getStr(NAME);
-    final List<String> canonicalCollectionList = parseCollectionsParameter(message.get("collections"));
-    final String canonicalCollectionsString = StrUtils.join(canonicalCollectionList, ',');
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    validateAllCollectionsExistAndNoDups(canonicalCollectionList, zkStateReader);
-
-    zkStateReader.aliasesHolder.applyModificationAndExportToZk(aliases -> aliases.cloneWithCollectionAlias(aliasName, canonicalCollectionsString));
-
-    // Sleep a bit to allow ZooKeeper state propagation.
-    //
-    // THIS IS A KLUDGE.
-    //
-    // Solr's view of the cluster is eventually consistent. *Eventually* all nodes and CloudSolrClients will be aware of
-    // alias changes, but not immediately. If a newly created alias is queried, things should work right away since Solr
-    // will attempt to see if it needs to get the latest aliases when it can't otherwise resolve the name.  However
-    // modifications to an alias will take some time.
-    //
-    // We could levy this requirement on the client but they would probably always add an obligatory sleep, which is
-    // just kicking the can down the road.  Perhaps ideally at this juncture here we could somehow wait until all
-    // Solr nodes in the cluster have the latest aliases?
-    Thread.sleep(100);
-  }
-
-  private void validateAllCollectionsExistAndNoDups(List<String> collectionList, ZkStateReader zkStateReader) {
-    final String collectionStr = StrUtils.join(collectionList, ',');
-
-    if (new HashSet<>(collectionList).size() != collectionList.size()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', since it contains duplicates", collectionStr));
-    }
-    ClusterState clusterState = zkStateReader.getClusterState();
-    Set<String> aliasNames = zkStateReader.getAliases().getCollectionAliasListMap().keySet();
-    for (String collection : collectionList) {
-      if (clusterState.getCollectionOrNull(collection) == null && !aliasNames.contains(collection)) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', '%s' is not an existing collection or alias", collectionStr, collection));
-      }
-    }
-  }
-  
-  /**
-   * The v2 API directs that the 'collections' parameter be provided as a JSON array (e.g. ["a", "b"]).  We also
-   * maintain support for the legacy format, a comma-separated list (e.g. a,b).
-   */
-  @SuppressWarnings("unchecked")
-  private List<String> parseCollectionsParameter(Object colls) {
-    if (colls == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "missing collections param");
-    if (colls instanceof List) return (List<String>) colls;
-    return StrUtils.splitSmart(colls.toString(), ",", true).stream()
-        .map(String::trim)
-        .collect(Collectors.toList());
-  }
-
-}


[08/41] lucene-solr:jira/solr-11702: SOLR-11064: Collection APIs should use the disk space hint when using policy framework

Posted by da...@apache.org.
SOLR-11064: Collection APIs should use the disk space hint when using policy framework


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d99799c7
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d99799c7
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d99799c7

Branch: refs/heads/jira/solr-11702
Commit: d99799c75c79b488d4db26c968d4e1a9cf415c6b
Parents: 1c4b417
Author: Noble Paul <no...@apache.org>
Authored: Wed Jan 17 03:04:34 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Wed Jan 17 03:04:34 2018 +1100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../DelegatingClusterStateProvider.java         |  7 ++
 .../solrj/cloud/autoscaling/PolicyHelper.java   | 33 ++++++++
 .../client/solrj/impl/ClusterStateProvider.java |  7 +-
 .../solrj/cloud/autoscaling/TestPolicy.java     | 83 ++++++++++++++++++++
 5 files changed, 131 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d99799c7/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index ecdcca1..4fd3ff1 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -87,6 +87,8 @@ New Features
 * SOLR-3218: Added range faceting support for CurrencyFieldType.  This includes both "facet.range" as well
   as json.facet's "type:range" (Andrew Morrison, Jan Høydahl, Vitaliy Zhovtyuk, hossman)
 
+* SOLR-11064: Collection APIs should use the disk space hint when using policy framework  (noble)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d99799c7/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/DelegatingClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/DelegatingClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/DelegatingClusterStateProvider.java
index e512ab3..e0b9bac 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/DelegatingClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/DelegatingClusterStateProvider.java
@@ -24,6 +24,7 @@ import java.util.Set;
 
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
 
 /**
  * Base class for overriding some behavior of {@link ClusterStateProvider}
@@ -90,6 +91,12 @@ public class DelegatingClusterStateProvider implements ClusterStateProvider {
   }
 
   @Override
+  public DocCollection getCollection(String name) throws IOException {
+    ClusterState cs = getClusterState();
+    return cs == null ? null : cs.getCollectionOrNull(name);
+  }
+
+  @Override
   public void connect() {
     if (delegate != null) {
       delegate.connect();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d99799c7/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
index 489a9f9..ae7c9af 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
@@ -21,7 +21,9 @@ package org.apache.solr.client.solrj.cloud.autoscaling;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.EnumMap;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
@@ -32,6 +34,7 @@ import org.apache.solr.client.solrj.cloud.autoscaling.Suggester.Hint;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.common.MapWriter;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.util.Pair;
@@ -40,7 +43,10 @@ import org.apache.solr.common.util.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.singletonList;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.solr.client.solrj.cloud.autoscaling.Suggestion.ConditionType.FREEDISK;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
 import static org.apache.solr.common.params.CoreAdminParams.NODE;
 import static org.apache.solr.common.util.Utils.time;
@@ -106,6 +112,30 @@ public class PolicyHelper {
 
       }
       session = sessionWrapper.session;
+      Map<String, Double> diskSpaceReqd = new HashMap<>();
+      try {
+        DocCollection coll = cloudManager.getClusterStateProvider().getCollection(collName);
+        if (coll != null) {
+          for (String shardName : shardNames) {
+            Replica ldr = coll.getLeader(shardName);
+            if (ldr != null) {
+              Map<String, Map<String, List<ReplicaInfo>>> details = cloudManager.getNodeStateProvider().getReplicaInfo(ldr.getNodeName(),
+                  Collections.singleton(FREEDISK.perReplicaValue));
+              ReplicaInfo replicaInfo = details.getOrDefault(collName, emptyMap()).getOrDefault(shardName, singletonList(null)).get(0);
+              if (replicaInfo != null) {
+                Object idxSz = replicaInfo.getVariables().get(FREEDISK.perReplicaValue);
+                if (idxSz != null) {
+                  diskSpaceReqd.put(shardName, 1.5 * (Double) Suggestion.ConditionType.FREEDISK.validate(null, idxSz, false));
+                }
+              }
+            }
+
+          }
+        }
+      } catch (IOException e) {
+        /*ignore*/
+      }
+
 
       Map<Replica.Type, Integer> typeVsCount = new EnumMap<>(Replica.Type.class);
       typeVsCount.put(Replica.Type.NRT, nrtReplicas);
@@ -123,6 +153,9 @@ public class PolicyHelper {
                 suggester = suggester.hint(Hint.TARGET_NODE, nodeName);
               }
             }
+            if (diskSpaceReqd.get(shardName) != null) {
+              suggester.hint(Hint.MINFREEDISK, diskSpaceReqd.get(shardName));
+            }
             SolrRequest op = suggester.getSuggestion();
             if (op == null) {
               throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No node can satisfy the rules " +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d99799c7/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java
index 3041a13..c04b80d 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java
@@ -17,12 +17,13 @@
 package org.apache.solr.client.solrj.impl;
 
 import java.io.IOException;
-import java.util.Map;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.solr.common.SolrCloseable;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
 
 public interface ClusterStateProvider extends SolrCloseable {
 
@@ -48,6 +49,10 @@ public interface ClusterStateProvider extends SolrCloseable {
    */
   ClusterState getClusterState() throws IOException;
 
+  default DocCollection getCollection(String name) throws IOException{
+   return getClusterState().getCollectionOrNull(name);
+  }
+
   /**
    * Obtain cluster properties.
    * @return configured cluster properties, or an empty map, never null.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d99799c7/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index 3a5caf9..2c119f3 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -40,6 +40,8 @@ import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
 import org.apache.solr.client.solrj.cloud.autoscaling.Suggester.Hint;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -54,6 +56,7 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.client.solrj.cloud.autoscaling.Suggestion.ConditionType.FREEDISK;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
 
@@ -1662,4 +1665,84 @@ public class TestPolicy extends SolrTestCaseJ4 {
     assertEquals("127.0.0.1:51147_solr" , op.getNode());
   }
 
+  public void testDiskSpaceReqd() {
+    String autoScaleJson = "{" +
+        "  cluster-preferences: [" +
+        "    { minimize : cores, precision: 2}" +
+        "  ]," +
+        "  cluster-policy: [" +
+        "    { replica : '0' , nodeRole: overseer}" +
+
+        "  ]" +
+        "}";
+
+
+    Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
+        "node1:{cores:12, freedisk: 334, heap:10480, sysprop.rack:rack3}," +
+        "node2:{cores:4, freedisk: 262, heap:6873, sysprop.fs : ssd, sysprop.rack:rack1}," +
+        "node3:{cores:7, freedisk: 749, heap:7834, sysprop.rack:rack4}," +
+        "node4:{cores:0, freedisk: 900, heap:16900, nodeRole:overseer, sysprop.rack:rack2}" +
+        "}");
+
+    SolrCloudManager cloudManager = new DelegatingCloudManager(null) {
+      @Override
+      public NodeStateProvider getNodeStateProvider() {
+        return new DelegatingNodeStateProvider(null) {
+          @Override
+          public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
+            Map<String, Object> result = new LinkedHashMap<>();
+            keys.stream().forEach(s -> result.put(s, nodeValues.get(node).get(s)));
+            return result;
+          }
+
+          @Override
+          public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
+            if (node.equals("node1")) {
+              Map m = Utils.makeMap("newColl",
+                  Utils.makeMap("shard1", Collections.singletonList(new ReplicaInfo("r1", "shard1",
+                      new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1")),
+                      Utils.makeMap(FREEDISK.perReplicaValue, 200)))));
+              return m;
+            } else if (node.equals("node2")) {
+              Map m = Utils.makeMap("newColl",
+                  Utils.makeMap("shard2", Collections.singletonList(new ReplicaInfo("r1", "shard2",
+                      new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2")),
+                      Utils.makeMap(FREEDISK.perReplicaValue, 200)))));
+              return m;
+            }
+            return Collections.emptyMap();
+          }
+        };
+      }
+
+      @Override
+      public ClusterStateProvider getClusterStateProvider() {
+        return new DelegatingClusterStateProvider(null) {
+          @Override
+          public Set<String> getLiveNodes() {
+            return new HashSet<>(Arrays.asList("node1", "node2", "node3", "node4"));
+          }
+
+          @Override
+          public DocCollection getCollection(String name) throws IOException {
+            return new DocCollection(name, Collections.emptyMap(), Collections.emptyMap(), DocRouter.DEFAULT) {
+              @Override
+              public Replica getLeader(String sliceName) {
+                if (sliceName.equals("shard1"))
+                  return new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1"));
+                if (sliceName.equals("shard2"))
+                  return new Replica("r2", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2"));
+                return null;
+              }
+            };
+          }
+        };
+      }
+    };
+    List<ReplicaPosition> locations = PolicyHelper.getReplicaLocations(
+        "newColl", new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScaleJson)),
+        cloudManager, null, Arrays.asList("shard1", "shard2"), 1, 0, 0, null);
+    assertTrue(locations.stream().allMatch(it -> "node3".equals(it.node)));
+  }
+
 }


[10/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
new file mode 100644
index 0000000..840f774
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test of the Collections API with the MiniSolrCloudCluster.
+ */
+@LuceneTestCase.Slow
+public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static final int numShards = 2;
+  private static final int numReplicas = 2;
+  private static final int maxShardsPerNode = 1;
+  private static final int nodeCount = 5;
+  private static final String configName = "solrCloudCollectionConfig";
+  private static final Map<String,String> collectionProperties  // ensure indexes survive core shutdown
+      = Collections.singletonMap("solr.directoryFactory", "solr.StandardDirectoryFactory");
+
+  @Override
+  public void setUp() throws Exception {
+    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
+    super.setUp();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    cluster.shutdown();
+    super.tearDown();
+  }
+
+  private void createCollection(String collectionName, String createNodeSet) throws Exception {
+    if (random().nextBoolean()) { // process asynchronously
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .processAndWait(cluster.getSolrClient(), 30);
+    }
+    else {
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .process(cluster.getSolrClient());
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish
+        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testCollectionCreateSearchDelete() throws Exception {
+    final CloudSolrClient client = cluster.getSolrClient();
+    final String collectionName = "testcollection";
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    // shut down a server
+    JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
+    assertTrue(stoppedServer.isStopped());
+    assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+
+    // create a server
+    JettySolrRunner startedServer = cluster.startJettySolrRunner();
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    // create collection
+    createCollection(collectionName, null);
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
+    assertEquals(1, rsp.getResults().getNumFound());
+
+    // remove a server not hosting any replicas
+    ZkStateReader zkStateReader = client.getZkStateReader();
+    zkStateReader.forceUpdateCollection(collectionName);
+    ClusterState clusterState = zkStateReader.getClusterState();
+    Map<String,JettySolrRunner> jettyMap = new HashMap<>();
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
+      jettyMap.put(key, jetty);
+    }
+    Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
+    // track the servers not host replicas
+    for (Slice slice : slices) {
+      jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
+      for (Replica replica : slice.getReplicas()) {
+        jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
+      }
+    }
+    assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
+    JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
+    jettys = cluster.getJettySolrRunners();
+    for (int i = 0; i < jettys.size(); ++i) {
+      if (jettys.get(i).equals(jettyToStop)) {
+        cluster.stopJettySolrRunner(i);
+        assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+      }
+    }
+
+    // re-create a server (to restore original nodeCount count)
+    startedServer = cluster.startJettySolrRunner(jettyToStop);
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+
+    // create it again
+    createCollection(collectionName, null);
+
+    // check that there's no left-over state
+    assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    assertEquals(1, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  @Test
+  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
+
+    final String collectionName = "testSolrCloudCollectionWithoutCores";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    assertFalse(cluster.getJettySolrRunners().isEmpty());
+
+    // create collection
+    createCollection(collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY);
+
+    // check the collection's corelessness
+    int coreCount = 0;
+    DocCollection docCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
+    for (Map.Entry<String,Slice> entry : docCollection.getSlicesMap().entrySet()) {
+      coreCount += entry.getValue().getReplicasMap().entrySet().size();
+    }
+    assertEquals(0, coreCount);
+
+    // delete the collection
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testStopAllStartAll() throws Exception {
+
+    final String collectionName = "testStopAllStartAllCollection";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    final SolrQuery query = new SolrQuery("*:*");
+    final SolrInputDocument doc = new SolrInputDocument();
+
+    // create collection
+    createCollection(collectionName, null);
+
+    ZkStateReader zkStateReader = client.getZkStateReader();
+
+    // modify collection
+    final int numDocs = 1 + random().nextInt(10);
+    for (int ii = 1; ii <= numDocs; ++ii) {
+      doc.setField("id", ""+ii);
+      client.add(collectionName, doc);
+      if (ii*2 == numDocs) client.commit(collectionName);
+    }
+    client.commit(collectionName);
+
+    // query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+
+    // the test itself
+    zkStateReader.forceUpdateCollection(collectionName);
+    final ClusterState clusterState = zkStateReader.getClusterState();
+
+    final Set<Integer> leaderIndices = new HashSet<>();
+    final Set<Integer> followerIndices = new HashSet<>();
+    {
+      final Map<String,Boolean> shardLeaderMap = new HashMap<>();
+      for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
+        for (final Replica replica : slice.getReplicas()) {
+          shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
+        }
+        shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
+      }
+      for (int ii = 0; ii < jettys.size(); ++ii) {
+        final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
+        final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
+        final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
+        if (Boolean.TRUE.equals(isLeader)) {
+          leaderIndices.add(ii);
+        } else if (Boolean.FALSE.equals(isLeader)) {
+          followerIndices.add(ii);
+        } // else neither leader nor follower i.e. node without a replica (for our collection)
+      }
+    }
+    final List<Integer> leaderIndicesList = new ArrayList<>(leaderIndices);
+    final List<Integer> followerIndicesList = new ArrayList<>(followerIndices);
+
+    // first stop the followers (in no particular order)
+    Collections.shuffle(followerIndicesList, random());
+    for (Integer ii : followerIndicesList) {
+      if (!leaderIndices.contains(ii)) {
+        cluster.stopJettySolrRunner(jettys.get(ii));
+      }
+    }
+
+    // then stop the leaders (again in no particular order)
+    Collections.shuffle(leaderIndicesList, random());
+    for (Integer ii : leaderIndicesList) {
+      cluster.stopJettySolrRunner(jettys.get(ii));
+    }
+
+    // calculate restart order
+    final List<Integer> restartIndicesList = new ArrayList<>();
+    Collections.shuffle(leaderIndicesList, random());
+    restartIndicesList.addAll(leaderIndicesList);
+    Collections.shuffle(followerIndicesList, random());
+    restartIndicesList.addAll(followerIndicesList);
+    if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
+
+    // and then restart jettys in that order
+    for (Integer ii : restartIndicesList) {
+      final JettySolrRunner jetty = jettys.get(ii);
+      if (!jetty.isRunning()) {
+        cluster.startJettySolrRunner(jetty);
+        assertTrue(jetty.isRunning());
+      }
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
+
+    zkStateReader.forceUpdateCollection(collectionName);
+
+    // re-query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
new file mode 100644
index 0000000..58ac17d
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.core.backup.repository.HdfsBackupRepository;
+import org.apache.solr.util.BadHdfsThreadsFilter;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.core.backup.BackupManager.BACKUP_NAME_PROP;
+import static org.apache.solr.core.backup.BackupManager.BACKUP_PROPS_FILE;
+import static org.apache.solr.core.backup.BackupManager.COLLECTION_NAME_PROP;
+import static org.apache.solr.core.backup.BackupManager.CONFIG_STATE_DIR;
+import static org.apache.solr.core.backup.BackupManager.ZK_STATE_DIR;
+
+/**
+ * This class implements the tests for HDFS integration for Solr backup/restore capability.
+ */
+@ThreadLeakFilters(defaultFilters = true, filters = {
+    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
+})
+public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+  public static final String SOLR_XML = "<solr>\n" +
+      "\n" +
+      "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
+      "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
+      "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
+      "\n" +
+      "  <shardHandlerFactory name=\"shardHandlerFactory\" class=\"HttpShardHandlerFactory\">\n" +
+      "    <str name=\"urlScheme\">${urlScheme:}</str>\n" +
+      "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n" +
+      "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n" +
+      "  </shardHandlerFactory>\n" +
+      "\n" +
+      "  <solrcloud>\n" +
+      "    <str name=\"host\">127.0.0.1</str>\n" +
+      "    <int name=\"hostPort\">${hostPort:8983}</int>\n" +
+      "    <str name=\"hostContext\">${hostContext:solr}</str>\n" +
+      "    <int name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n" +
+      "    <bool name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n" +
+      "    <int name=\"leaderVoteWait\">10000</int>\n" +
+      "    <int name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n" +
+      "    <int name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n" +
+      "  </solrcloud>\n" +
+      "  \n" +
+      "  <backup>\n" +
+      "    <repository  name=\"hdfs\" class=\"org.apache.solr.core.backup.repository.HdfsBackupRepository\"> \n" +
+      "      <str name=\"location\">${solr.hdfs.default.backup.path}</str>\n" +
+      "      <str name=\"solr.hdfs.home\">${solr.hdfs.home:}</str>\n" +
+      "      <str name=\"solr.hdfs.confdir\">${solr.hdfs.confdir:}</str>\n" +
+      "    </repository>\n" +
+      "  </backup>\n" +
+      "  \n" +
+      "</solr>\n";
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private static MiniDFSCluster dfsCluster;
+  private static String hdfsUri;
+  private static FileSystem fs;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
+    hdfsUri = HdfsTestUtil.getURI(dfsCluster);
+    try {
+      URI uri = new URI(hdfsUri);
+      Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
+      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      fs = FileSystem.get(uri, conf);
+
+      if (fs instanceof DistributedFileSystem) {
+        // Make sure dfs is not in safe mode
+        while (((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
+          log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
+          try {
+            Thread.sleep(5000);
+          } catch (InterruptedException e) {
+            Thread.interrupted();
+            // continue
+          }
+        }
+      }
+
+      fs.mkdirs(new org.apache.hadoop.fs.Path("/backup"));
+    } catch (IOException | URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+
+    System.setProperty("solr.hdfs.default.backup.path", "/backup");
+    System.setProperty("solr.hdfs.home", hdfsUri + "/solr");
+    useFactory("solr.StandardDirectoryFactory");
+
+    configureCluster(NUM_SHARDS)// nodes
+    .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+    .withSolrXml(SOLR_XML)
+    .configure();
+  }
+
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    System.clearProperty("solr.hdfs.home");
+    System.clearProperty("solr.hdfs.default.backup.path");
+    System.clearProperty("test.build.data");
+    System.clearProperty("test.cache.data");
+    IOUtils.closeQuietly(fs);
+    fs = null;
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+
+  @Override
+  public String getCollectionName() {
+    return "hdfsbackuprestore";
+  }
+
+  @Override
+  public String getBackupRepoName() {
+    return "hdfs";
+  }
+
+  @Override
+  public String getBackupLocation() {
+    return null;
+  }
+
+  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
+    String backupName = "configonlybackup";
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+        .setRepositoryName(getBackupRepoName())
+        .setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
+    backup.process(solrClient);
+
+    Map<String,String> params = new HashMap<>();
+    params.put("location", "/backup");
+    params.put("solr.hdfs.home", hdfsUri + "/solr");
+
+    HdfsBackupRepository repo = new HdfsBackupRepository();
+    repo.init(new NamedList<>(params));
+    BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());
+
+    URI baseLoc = repo.createURI("/backup");
+
+    Properties props = mgr.readBackupProperties(baseLoc, backupName);
+    assertNotNull(props);
+    assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
+    assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
+    assertEquals(configName, props.getProperty(COLL_CONF));
+
+    DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
+    assertNotNull(collectionState);
+    assertEquals(collectionName, collectionState.getName());
+
+    URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
+    assertTrue(repo.exists(configDirLoc));
+
+    Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
+    URI backupLoc = repo.resolve(baseLoc, backupName);
+    String[] dirs = repo.listAll(backupLoc);
+    for (String d : dirs) {
+      assertTrue(expected.contains(d));
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
new file mode 100644
index 0000000..587b9b1
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.junit.BeforeClass;
+
+/**
+ * This class implements the tests for local file-system integration for Solr backup/restore capability.
+ * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case
+ * such file-system would be exposed via local file-system API.
+ */
+public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+  private static String backupLocation;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    configureCluster(NUM_SHARDS)// nodes
+        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+        .configure();
+
+    boolean whitespacesInPath = random().nextBoolean();
+    if (whitespacesInPath) {
+      backupLocation = createTempDir("my backup").toAbsolutePath().toString();
+    } else {
+      backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
+    }
+  }
+
+  @Override
+  public String getCollectionName() {
+    return "backuprestore";
+  }
+
+  @Override
+  public String getBackupRepoName() {
+    return null;
+  }
+
+  @Override
+  public String getBackupLocation() {
+    return backupLocation;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
new file mode 100644
index 0000000..d327aec
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+
+@Slow
+public class TestReplicaProperties extends ReplicaPropertiesBase {
+
+  public static final String COLLECTION_NAME = "testcollection";
+
+  public TestReplicaProperties() {
+    schemaString = "schema15.xml";      // we need a string id
+    sliceCount = 2;
+  }
+
+  @Test
+  @ShardsFixed(num = 4)
+  public void test() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
+      // shards, replicationfactor, maxreplicaspernode
+      int shards = random().nextInt(7);
+      if (shards < 2) shards = 2;
+      int rFactor = random().nextInt(4);
+      if (rFactor < 2) rFactor = 2;
+      createCollection(null, COLLECTION_NAME, shards, rFactor, shards * rFactor + 1, client, null, "conf1");
+    }
+
+    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
+    waitForRecoveriesToFinish(COLLECTION_NAME, false);
+
+    listCollection();
+
+    clusterAssignPropertyTest();
+  }
+
+  private void listCollection() throws IOException, SolrServerException {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.LIST.toString());
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      List<String> collections = (List<String>) rsp.get("collections");
+      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
+      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
+      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
+    }
+  }
+
+
+  private void clusterAssignPropertyTest() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "property", "preferredLeader");
+      } catch (SolrException se) {
+        assertTrue("Should have seen missing required parameter 'collection' error",
+            se.getMessage().contains("Missing required parameter: collection"));
+      }
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "preferredLeader");
+
+      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "preferredleader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "property.newunique",
+          "shardUnique", "true");
+      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "property.newunique");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "collection", COLLECTION_NAME,
+            "property", "whatever",
+            "shardUnique", "false");
+        fail("Should have thrown an exception here.");
+      } catch (SolrException se) {
+        assertTrue("Should have gotten a specific error message here",
+            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the " +
+                "property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
+      }
+      // Should be able to set non-unique-per-slice values in several places.
+      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
+      List<String> sliceList = new ArrayList<>(slices.keySet());
+      String c1_s1 = sliceList.get(0);
+      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
+      String c1_s1_r1 = replicasList.get(0);
+      String c1_s1_r2 = replicasList.get(1);
+
+      addProperty(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "bogus1",
+          "property.value", "true");
+
+      addProperty(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r2,
+          "property", "property.bogus1",
+          "property.value", "whatever");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "collection", COLLECTION_NAME,
+            "property", "bogus1",
+            "shardUnique", "false");
+        fail("Should have thrown parameter error here");
+      } catch (SolrException se) {
+        assertTrue("Should have caught specific exception ",
+            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the property be " +
+                "pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
+      }
+
+      // Should have no effect despite the "shardUnique" param being set.
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "property.bogus1",
+          "shardUnique", "true");
+
+      verifyPropertyVal(client, COLLECTION_NAME,
+          c1_s1_r1, "bogus1", "true");
+      verifyPropertyVal(client, COLLECTION_NAME,
+          c1_s1_r2, "property.bogus1", "whatever");
+
+      // At this point we've assigned a preferred leader. Make it happen and check that all the nodes that are
+      // leaders _also_ have the preferredLeader property set.
+
+
+      NamedList<Object> res = doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.REBALANCELEADERS.toString(),
+          "collection", COLLECTION_NAME);
+
+      verifyLeaderAssignment(client, COLLECTION_NAME);
+
+    }
+  }
+
+  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
+      throws InterruptedException, KeeperException {
+    String lastFailMsg = "";
+    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
+      lastFailMsg = "";
+      ClusterState clusterState = client.getZkStateReader().getClusterState();
+      for (Slice slice : clusterState.getCollection(collectionName).getSlices()) {
+        Boolean foundLeader = false;
+        Boolean foundPreferred = false;
+        for (Replica replica : slice.getReplicas()) {
+          Boolean isLeader = replica.getBool("leader", false);
+          Boolean isPreferred = replica.getBool("property.preferredleader", false);
+          if (isLeader != isPreferred) {
+            lastFailMsg = "Replica should NOT have preferredLeader != leader. Preferred: " + isPreferred.toString() +
+                " leader is " + isLeader.toString();
+          }
+          if (foundLeader && isLeader) {
+            lastFailMsg = "There should only be a single leader in _any_ shard! Replica " + replica.getName() +
+                " is the second leader in slice " + slice.getName();
+          }
+          if (foundPreferred && isPreferred) {
+            lastFailMsg = "There should only be a single preferredLeader in _any_ shard! Replica " + replica.getName() +
+                " is the second preferredLeader in slice " + slice.getName();
+          }
+          foundLeader = foundLeader ? foundLeader : isLeader;
+          foundPreferred = foundPreferred ? foundPreferred : isPreferred;
+        }
+      }
+      if (lastFailMsg.length() == 0) return;
+      Thread.sleep(100);
+    }
+    fail(lastFailMsg);
+  }
+
+  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
+    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    for (int idx = 0; idx < paramsIn.length; idx += 2) {
+      params.set(paramsIn[idx], paramsIn[idx + 1]);
+    }
+    QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    client.request(request);
+
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
new file mode 100644
index 0000000..3d32d6c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.junit.Test;
+
+public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
+
+  public static final int MAX_WAIT_TIMEOUT_SECONDS = 90;
+
+  public TestRequestStatusCollectionAPI() {
+    schemaString = "schema15.xml";      // we need a string id
+  }
+
+  @Test
+  public void test() throws Exception {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection2");
+    params.set("numShards", 2);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1000");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    // Check for the request to be completed.
+
+    NamedList r = null;
+    NamedList status = null;
+    String message = null;
+
+    params = new ModifiableSolrParams();
+
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1000");
+
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("found [1000] in completed tasks", message);
+
+    // Check for a random (hopefully non-existent request id
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "9999999");
+    try {
+      r = sendRequest(params);
+      status = (NamedList) r.get("status");
+      message = (String) status.get("msg");
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("Did not find [9999999] in any tasks queue", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.SPLITSHARD.toString());
+    params.set("collection", "collection2");
+    params.set("shard", "shard1");
+    params.set(CommonAdminParams.ASYNC, "1001");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    // Check for the request to be completed.
+    params = new ModifiableSolrParams();
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1001");
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("found [1001] in completed tasks", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection2");
+    params.set("numShards", 2);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1002");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    params = new ModifiableSolrParams();
+
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1002");
+
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+
+    assertEquals("found [1002] in failed tasks", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection3");
+    params.set("numShards", 1);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1002");
+    try {
+      r = sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("Task with the same requestid already exists.", r.get("error"));
+  }
+
+  /**
+   * Helper method to send a status request with specific retry limit and return
+   * the message/null from the success response.
+   */
+  private String sendStatusRequestWithRetry(ModifiableSolrParams params, int maxCounter)
+      throws SolrServerException, IOException{
+    String message = null;
+    while (maxCounter-- > 0) {
+      final NamedList r = sendRequest(params);
+      final NamedList status = (NamedList) r.get("status");
+      final RequestStatusState state = RequestStatusState.fromKey((String) status.get("state"));
+      message = (String) status.get("msg");
+
+      if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
+        return message;
+      }
+
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+
+    }
+    // Return last state?
+    return message;
+  }
+
+  protected NamedList sendRequest(ModifiableSolrParams params) throws SolrServerException, IOException {
+    SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.getSolrClient()).getBaseURL();
+    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
+
+    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 15000)) {
+      return baseServer.request(request);
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index a842a87..2310a14 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -80,7 +80,7 @@ import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.REQUESTID;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
 
 /**
  * Simulated {@link SolrCloudManager}.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 22f9fb9..86de8ff 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -47,11 +47,11 @@ import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.cloud.ActionThrottle;
-import org.apache.solr.cloud.AddReplicaCmd;
-import org.apache.solr.cloud.Assign;
-import org.apache.solr.cloud.CreateCollectionCmd;
-import org.apache.solr.cloud.CreateShardCmd;
-import org.apache.solr.cloud.SplitShardCmd;
+import org.apache.solr.cloud.api.collections.AddReplicaCmd;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.CreateCollectionCmd;
+import org.apache.solr.cloud.api.collections.CreateShardCmd;
+import org.apache.solr.cloud.api.collections.SplitShardCmd;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.ZkWriteCommand;
@@ -730,7 +730,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
-   * Move replica. This uses a similar algorithm as {@link org.apache.solr.cloud.MoveReplicaCmd#moveNormalReplica(ClusterState, NamedList, String, String, DocCollection, Replica, Slice, int, boolean)}.
+   * Move replica. This uses a similar algorithm as {@link org.apache.solr.cloud.api.collections.MoveReplicaCmd#moveNormalReplica(ClusterState, NamedList, String, String, DocCollection, Replica, Slice, int, boolean)}.
    * @param message operation details
    * @param results operation results.
    */
@@ -909,7 +909,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
-   * Delete a shard. This uses a similar algorithm as {@link org.apache.solr.cloud.DeleteShardCmd}
+   * Delete a shard. This uses a similar algorithm as {@link org.apache.solr.cloud.api.collections.DeleteShardCmd}
    * @param message operation details
    * @param results operation results
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
index c242809..cd45c15 100644
--- a/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
@@ -43,7 +43,7 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.AbstractZkTestCase;
 import org.apache.solr.cloud.ChaosMonkey;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -74,9 +74,8 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES;
 import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_PROPS;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -448,9 +447,9 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
     for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
       if (entry.getValue() != null) params.set(entry.getKey(), String.valueOf(entry.getValue()));
     }
-    Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
+    Integer numShards = (Integer) collectionProps.get(OverseerCollectionMessageHandler.NUM_SLICES);
     if (numShards == null) {
-      String shardNames = (String) collectionProps.get(SHARDS_PROP);
+      String shardNames = (String) collectionProps.get(OverseerCollectionMessageHandler.SHARDS_PROP);
       numShards = StrUtils.splitSmart(shardNames, ',').size();
     }
     Integer replicationFactor = (Integer) collectionProps.get(REPLICATION_FACTOR);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
deleted file mode 100644
index 58d499b..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.hdfs;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.carrotsearch.randomizedtesting.annotations.Nightly;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.codahale.metrics.Counter;
-import com.codahale.metrics.Metric;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.CoreStatus;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.cloud.CollectionsAPIDistributedZkTest;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.util.BadHdfsThreadsFilter;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-@Slow
-@Nightly
-@ThreadLeakFilters(defaultFilters = true, filters = {
-    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
-})
-public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
-
-  private static MiniDFSCluster dfsCluster;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
-    System.setProperty("tests.hdfs.numdatanodes", "1");
-   
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
-
-    ZkConfigManager configManager = new ZkConfigManager(zkClient());
-    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
-    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf2");
-
-    System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
-  }
-
-  @AfterClass
-  public static void teardownClass() throws Exception {
-    cluster.shutdown(); // need to close before the MiniDFSCluster
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
-    System.clearProperty("solr.hdfs.blockcache.blocksperbank");
-    System.clearProperty("tests.hdfs.numdatanodes");
-    System.clearProperty("solr.hdfs.home");
-  }
-
-  @Test
-  public void moveReplicaTest() throws Exception {
-    cluster.waitForAllNodes(5000);
-    String coll = "movereplicatest_coll";
-
-    CloudSolrClient cloudClient = cluster.getSolrClient();
-
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
-    create.setMaxShardsPerNode(2);
-    cloudClient.request(create);
-
-    for (int i = 0; i < 10; i++) {
-      cloudClient.add(coll, sdoc("id",String.valueOf(i)));
-      cloudClient.commit(coll);
-    }
-
-    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
-    Collections.shuffle(slices, random());
-    Slice slice = null;
-    Replica replica = null;
-    for (Slice s : slices) {
-      slice = s;
-      for (Replica r : s.getReplicas()) {
-        if (s.getLeader() != r) {
-          replica = r;
-        }
-      }
-    }
-    String dataDir = getDataDir(replica);
-
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
-    ArrayList<String> l = new ArrayList<>(liveNodes);
-    Collections.shuffle(l, random());
-    String targetNode = null;
-    for (String node : liveNodes) {
-      if (!replica.getNodeName().equals(node)) {
-        targetNode = node;
-        break;
-      }
-    }
-    assertNotNull(targetNode);
-
-    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
-    moveReplica.process(cloudClient);
-
-    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
-    checkNumOfCores(cloudClient, targetNode, 2);
-
-    waitForState("Wait for recovery finish failed",coll, clusterShape(2,2));
-    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
-    boolean found = false;
-    for (Replica newReplica : slice.getReplicas()) {
-      if (getDataDir(newReplica).equals(dataDir)) {
-        found = true;
-      }
-    }
-    assertTrue(found);
-
-
-    // data dir is reused so replication will be skipped
-    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
-      List<String> registryNames = manager.registryNames().stream()
-          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
-      for (String registry : registryNames) {
-        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
-        Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
-        if (counter != null) {
-          assertEquals(0, counter.getCount());
-        }
-      }
-    }
-  }
-
-
-  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
-    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
-  }
-
-  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
-    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
-      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
-      return status.getCoreStatus().size();
-    }
-  }
-
-  private String getDataDir(Replica replica) throws IOException, SolrServerException {
-    try (HttpSolrClient coreclient = getHttpSolrClient(replica.getBaseUrl())) {
-      CoreStatus status = CoreAdminRequest.getCoreStatus(replica.getCoreName(), coreclient);
-      return status.getDataDirectory();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index 9f0ff20..b031393 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -56,6 +56,7 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.CoreAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
@@ -96,9 +97,6 @@ import org.noggit.JSONWriter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
 import static org.apache.solr.common.util.Utils.makeMap;
 
 /**
@@ -174,7 +172,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     }
   }
 
-  static class CloudSolrServerClient {
+  public static class CloudSolrServerClient {
     SolrClient solrClient;
     String shardName;
     int port;
@@ -186,6 +184,10 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
       this.solrClient = client;
     }
 
+    public SolrClient getSolrClient() {
+      return solrClient;
+    }
+
     @Override
     public int hashCode() {
       final int prime = 31;
@@ -1621,9 +1623,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
       if(entry.getValue() !=null) params.set(entry.getKey(), String.valueOf(entry.getValue()));
     }
-    Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
+    Integer numShards = (Integer) collectionProps.get(OverseerCollectionMessageHandler.NUM_SLICES);
     if(numShards==null){
-      String shardNames = (String) collectionProps.get(SHARDS_PROP);
+      String shardNames = (String) collectionProps.get(OverseerCollectionMessageHandler.SHARDS_PROP);
       numShards = StrUtils.splitSmart(shardNames,',').size();
     }
     Integer numNrtReplicas = (Integer) collectionProps.get(ZkStateReader.NRT_REPLICAS);
@@ -1685,12 +1687,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
     return createCollection(collectionInfos, collectionName,
         Utils.makeMap(
-        NUM_SLICES, numShards,
-        ZkStateReader.NRT_REPLICAS, numNrtReplicas,
-        ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
-        ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        CREATE_NODE_SET, createNodeSetStr,
-        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
+            OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+            ZkStateReader.NRT_REPLICAS, numNrtReplicas,
+            ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
+            ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
+            OverseerCollectionMessageHandler.CREATE_NODE_SET, createNodeSetStr,
+            ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
         client, configSetName);
   }
 
@@ -1701,12 +1703,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
     return createCollection(collectionInfos, collectionName,
         Utils.makeMap(
-        NUM_SLICES, numShards,
-        ZkStateReader.NRT_REPLICAS, numNrtReplicas,
-        ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
-        ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        CREATE_NODE_SET, createNodeSetStr,
-        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
+            OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+            ZkStateReader.NRT_REPLICAS, numNrtReplicas,
+            ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
+            ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
+            OverseerCollectionMessageHandler.CREATE_NODE_SET, createNodeSetStr,
+            ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
         client, configName);
   }
 
@@ -1905,7 +1907,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
         ZkStateReader.NRT_REPLICAS, numNrtReplicas,
         ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
         ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        NUM_SLICES, numShards);
+        OverseerCollectionMessageHandler.NUM_SLICES, numShards);
     Map<String,List<Integer>> collectionInfos = new HashMap<>();
     createCollection(collectionInfos, collName, props, client);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 7f4f0cb..360632c 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -419,7 +419,7 @@ public class MiniSolrCloudCluster {
     return jetty;
   }
 
-  protected JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
+  public JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
     jetty.stop();
     return jetty;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java b/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
index 90eea94..ac1c5e1 100644
--- a/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
+++ b/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
@@ -73,7 +73,7 @@ public class MiniSolrCloudClusterTest extends LuceneTestCase {
 
     MiniSolrCloudCluster cluster = new MiniSolrCloudCluster(3, createTempDir(), JettyConfig.builder().build()) {
       @Override
-      protected JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
+      public JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
         JettySolrRunner j = super.stopJettySolrRunner(jetty);
         if (jettyIndex.incrementAndGet() == 2)
           throw new IOException("Fake IOException on shutdown!");


[07/41] lucene-solr:jira/solr-11702: SOLR-11794: Restart replicate from ledaer on core reload on PULL replicas

Posted by da...@apache.org.
SOLR-11794: Restart replicate from ledaer on core reload on PULL replicas


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1c4b417c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1c4b417c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1c4b417c

Branch: refs/heads/jira/solr-11702
Commit: 1c4b417c50cc167818a9baf656c8f1f21614262f
Parents: a08f712
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Mon Jan 15 16:15:14 2018 -0800
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Mon Jan 15 16:15:14 2018 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../org/apache/solr/core/CoreContainer.java     |  3 +
 .../org/apache/solr/cloud/TestPullReplica.java  | 78 ++++++++++++--------
 3 files changed, 51 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c4b417c/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 8cac79a..ecdcca1 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -104,6 +104,8 @@ Bug Fixes
 
 * SOLR-11839: Fix test failures resulting from SOLR-11218 (Erick Erickson)
 
+* SOLR-11794: PULL replicas stop replicating after collection RELOAD (Samuel Tatipamula, Tomás Fernández Löbbe)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c4b417c/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index eb13775..4e795b6 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -1300,6 +1300,9 @@ public class CoreContainer {
               getZkController().startReplicationFromLeader(newCore.getName(), true);
             }
 
+          } else if(replica.getType() == Replica.Type.PULL) {
+            getZkController().stopReplicationFromLeader(core.getName());
+            getZkController().startReplicationFromLeader(newCore.getName(), false);
           }
         }
       } catch (SolrCoreState.CoreIsClosedException e) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c4b417c/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
index 7d652b8..e153998 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
@@ -214,45 +214,59 @@ public class TestPullReplica extends SolrCloudTestCase {
   
   @SuppressWarnings("unchecked")
   public void testAddDocs() throws Exception {
-    int numReadOnlyReplicas = 1 + random().nextInt(3);
-    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, numReadOnlyReplicas)
+    int numPullReplicas = 1 + random().nextInt(3);
+    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, numPullReplicas)
     .setMaxShardsPerNode(100)
     .process(cluster.getSolrClient());
-    waitForState("Expected collection to be created with 1 shard and " + (numReadOnlyReplicas + 1) + " replicas", collectionName, clusterShape(1, numReadOnlyReplicas + 1));
-    DocCollection docCollection = assertNumberOfReplicas(1, 0, numReadOnlyReplicas, false, true);
+    waitForState("Expected collection to be created with 1 shard and " + (numPullReplicas + 1) + " replicas", collectionName, clusterShape(1, numPullReplicas + 1));
+    DocCollection docCollection = assertNumberOfReplicas(1, 0, numPullReplicas, false, true);
     assertEquals(1, docCollection.getSlices().size());
     
-    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
-    cluster.getSolrClient().commit(collectionName);
-    
-    Slice s = docCollection.getSlices().iterator().next();
-    try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
-      assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-    }
-    
-    TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    for (Replica r:s.getReplicas(EnumSet.of(Replica.Type.PULL))) {
-      //TODO: assert replication < REPLICATION_TIMEOUT_SECS
-      try (HttpSolrClient readOnlyReplicaClient = getHttpSolrClient(r.getCoreUrl())) {
-        while (true) {
-          try {
-            assertEquals("Replica " + r.getName() + " not up to date after 10 seconds",
-                1, readOnlyReplicaClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-            break;
-          } catch (AssertionError e) {
-            if (t.hasTimedOut()) {
-              throw e;
-            } else {
-              Thread.sleep(100);
+    boolean reloaded = false;
+    int numDocs = 0;
+    while (true) {
+      numDocs++;
+      cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", String.valueOf(numDocs), "foo", "bar"));
+      cluster.getSolrClient().commit(collectionName);
+      
+      Slice s = docCollection.getSlices().iterator().next();
+      try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
+        assertEquals(numDocs, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+      }
+      
+      TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+      for (Replica r:s.getReplicas(EnumSet.of(Replica.Type.PULL))) {
+        //TODO: assert replication < REPLICATION_TIMEOUT_SECS
+        try (HttpSolrClient pullReplicaClient = getHttpSolrClient(r.getCoreUrl())) {
+          while (true) {
+            try {
+              assertEquals("Replica " + r.getName() + " not up to date after 10 seconds",
+                  numDocs, pullReplicaClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+              break;
+            } catch (AssertionError e) {
+              if (t.hasTimedOut()) {
+                throw e;
+              } else {
+                Thread.sleep(100);
+              }
             }
           }
+          SolrQuery req = new SolrQuery(
+              "qt", "/admin/plugins",
+              "stats", "true");
+          QueryResponse statsResponse = pullReplicaClient.query(req);
+          assertEquals("Replicas shouldn't process the add document request: " + statsResponse, 
+              0L, ((Map<String, Object>)((NamedList<Object>)statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.adds"));
         }
-        SolrQuery req = new SolrQuery(
-            "qt", "/admin/plugins",
-            "stats", "true");
-        QueryResponse statsResponse = readOnlyReplicaClient.query(req);
-        assertEquals("Replicas shouldn't process the add document request: " + statsResponse, 
-            0L, ((Map<String, Object>)((NamedList<Object>)statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.adds"));
+      }
+      if (reloaded) {
+        break;
+      } else {
+        // reload
+        CollectionAdminResponse response = CollectionAdminRequest.reloadCollection(collectionName)
+        .process(cluster.getSolrClient());
+        assertEquals(0, response.getStatus());
+        reloaded = true;
       }
     }
     assertUlogPresence(docCollection);


[23/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
deleted file mode 100644
index 2171c60..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.cloud.overseer.ClusterStateMutator;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.StrUtils.formatString;
-
-public class CreateCollectionCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-  private final DistribStateManager stateManager;
-
-  public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.stateManager = ocmh.cloudManager.getDistribStateManager();
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    final String collectionName = message.getStr(NAME);
-    final boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    log.info("Create collection {}", collectionName);
-    if (clusterState.hasCollection(collectionName)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
-    }
-
-    String configName = getConfigName(collectionName, message);
-    if (configName == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to associate with the collection.");
-    }
-
-    ocmh.validateConfigOrThrowSolrException(configName);
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-
-    try {
-
-      final String async = message.getStr(ASYNC);
-
-      List<String> nodeList = new ArrayList<>();
-      List<String> shardNames = new ArrayList<>();
-      List<ReplicaPosition> replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, message,
-          nodeList, shardNames, sessionWrapper);
-      ZkStateReader zkStateReader = ocmh.zkStateReader;
-      boolean isLegacyCloud = Overseer.isLegacy(zkStateReader);
-
-      ocmh.createConfNode(stateManager, configName, collectionName, isLegacyCloud);
-
-      Map<String,String> collectionParams = new HashMap<>();
-      Map<String,Object> collectionProps = message.getProperties();
-      for (String propName : collectionProps.keySet()) {
-        if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-          collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), (String) collectionProps.get(propName));
-        }
-      }
-      
-      createCollectionZkNode(stateManager, collectionName, collectionParams);
-      
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-
-      // wait for a while until we don't see the collection
-      TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean created = false;
-      while (! waitUntil.hasTimedOut()) {
-        waitUntil.sleep(100);
-        created = ocmh.cloudManager.getClusterStateProvider().getClusterState().hasCollection(collectionName);
-        if(created) break;
-      }
-      if (!created)
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
-
-      if (nodeList.isEmpty()) {
-        log.debug("Finished create command for collection: {}", collectionName);
-        return;
-      }
-
-      // For tracking async calls.
-      Map<String, String> requestMap = new HashMap<>();
-
-
-      log.debug(formatString("Creating SolrCores for new collection {0}, shardNames {1} , message : {2}",
-          collectionName, shardNames, message));
-      Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
-      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-      for (ReplicaPosition replicaPosition : replicaPositions) {
-        String nodeName = replicaPosition.node;
-        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
-            ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
-            replicaPosition.shard, replicaPosition.type, true);
-        log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
-            , coreName, replicaPosition.shard, collectionName, nodeName));
-
-
-        String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
-        //in the new mode, create the replica in clusterstate prior to creating the core.
-        // Otherwise the core creation fails
-        if (!isLegacyCloud) {
-          ZkNodeProps props = new ZkNodeProps(
-              Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
-              ZkStateReader.COLLECTION_PROP, collectionName,
-              ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
-              ZkStateReader.CORE_NAME_PROP, coreName,
-              ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-              ZkStateReader.BASE_URL_PROP, baseUrl,
-              ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
-              CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        }
-
-        // Need to create new params for each request
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-
-        params.set(CoreAdminParams.NAME, coreName);
-        params.set(COLL_CONF, configName);
-        params.set(CoreAdminParams.COLLECTION, collectionName);
-        params.set(CoreAdminParams.SHARD, replicaPosition.shard);
-        params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
-        params.set(CoreAdminParams.NEW_COLLECTION, "true");
-        params.set(CoreAdminParams.REPLICA_TYPE, replicaPosition.type.name());
-
-        if (async != null) {
-          String coreAdminAsyncId = async + Math.abs(System.nanoTime());
-          params.add(ASYNC, coreAdminAsyncId);
-          requestMap.put(nodeName, coreAdminAsyncId);
-        }
-        ocmh.addPropertyParams(message, params);
-
-        ShardRequest sreq = new ShardRequest();
-        sreq.nodeName = nodeName;
-        params.set("qt", ocmh.adminPath);
-        sreq.purpose = 1;
-        sreq.shards = new String[]{baseUrl};
-        sreq.actualShards = sreq.shards;
-        sreq.params = params;
-
-        if (isLegacyCloud) {
-          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
-        } else {
-          coresToCreate.put(coreName, sreq);
-        }
-      }
-
-      if(!isLegacyCloud) {
-        // wait for all replica entries to be created
-        Map<String, Replica> replicas = ocmh.waitToSeeReplicasInState(collectionName, coresToCreate.keySet());
-        for (Map.Entry<String, ShardRequest> e : coresToCreate.entrySet()) {
-          ShardRequest sreq = e.getValue();
-          sreq.params.set(CoreAdminParams.CORE_NODE_NAME, replicas.get(e.getKey()).getName());
-          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
-        }
-      }
-
-      ocmh.processResponses(results, shardHandler, false, null, async, requestMap, Collections.emptySet());
-      if(results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size() > 0) {
-        // Let's cleanup as we hit an exception
-        // We shouldn't be passing 'results' here for the cleanup as the response would then contain 'success'
-        // element, which may be interpreted by the user as a positive ack
-        ocmh.cleanupCollection(collectionName, new NamedList());
-        log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
-      } else {
-        log.debug("Finished create command on all shards for collection: {}", collectionName);
-
-        // Emit a warning about production use of data driven functionality
-        boolean defaultConfigSetUsed = message.getStr(COLL_CONF) == null ||
-            message.getStr(COLL_CONF).equals(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
-        if (defaultConfigSetUsed) {
-          results.add("warning", "Using _default configset. Data driven schema functionality"
-              + " is enabled by default, which is NOT RECOMMENDED for production use. To turn it off:"
-              + " curl http://{host:port}/solr/" + collectionName + "/config -d '{\"set-user-property\": {\"update.autoCreateFields\":\"false\"}}'");
-        }
-      }
-    } catch (SolrException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, ex);
-    } finally {
-      if (sessionWrapper.get() != null) sessionWrapper.get().release();
-    }
-  }
-
-  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                            ZkNodeProps message,
-                                                            List<String> nodeList, List<String> shardNames,
-                                                            AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    final String collectionName = message.getStr(NAME);
-    // look at the replication factor and see if it matches reality
-    // if it does not, find best nodes to create more cores
-    int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
-    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
-    int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    String policy = message.getStr(Policy.POLICY);
-    boolean usePolicyFramework = !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || policy != null;
-
-    Integer numSlices = message.getInt(NUM_SLICES, null);
-    String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
-    if(ImplicitDocRouter.NAME.equals(router)){
-      ClusterStateMutator.getShardNames(shardNames, message.getStr("shards", null));
-      numSlices = shardNames.size();
-    } else {
-      if (numSlices == null ) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NUM_SLICES + " is a required param (when using CompositeId router).");
-      }
-      if (numSlices <= 0) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NUM_SLICES + " must be > 0");
-      }
-      ClusterStateMutator.getShardNames(numSlices, shardNames);
-    }
-
-    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
-    if (usePolicyFramework && message.getStr(MAX_SHARDS_PER_NODE) != null && maxShardsPerNode > 0) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "'maxShardsPerNode>0' is not supported when autoScaling policies are used");
-    }
-    if (maxShardsPerNode == -1 || usePolicyFramework) maxShardsPerNode = Integer.MAX_VALUE;
-    if (numNrtReplicas + numTlogReplicas <= 0) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
-    }
-
-    // we need to look at every node and see how many cores it serves
-    // add our new cores to existing nodes serving the least number of cores
-    // but (for now) require that each core goes on a distinct node.
-
-    List<ReplicaPosition> replicaPositions;
-    nodeList.addAll(Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM));
-    if (nodeList.isEmpty()) {
-      log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
-
-      replicaPositions = new ArrayList<>();
-    } else {
-      int totalNumReplicas = numNrtReplicas + numTlogReplicas + numPullReplicas;
-      if (totalNumReplicas > nodeList.size()) {
-        log.warn("Specified number of replicas of "
-            + totalNumReplicas
-            + " on collection "
-            + collectionName
-            + " is higher than the number of Solr instances currently live or live and part of your " + CREATE_NODE_SET + "("
-            + nodeList.size()
-            + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
-      }
-
-      int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
-          Integer.MAX_VALUE :
-          maxShardsPerNode * nodeList.size();
-      int requestedShardsToCreate = numSlices * totalNumReplicas;
-      if (maxShardsAllowedToCreate < requestedShardsToCreate) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
-            + MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
-            + ", and the number of nodes currently live or live and part of your "+CREATE_NODE_SET+" is " + nodeList.size()
-            + ". This allows a maximum of " + maxShardsAllowedToCreate
-            + " to be created. Value of " + NUM_SLICES + " is " + numSlices
-            + ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
-            + ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
-            + " and value of " + PULL_REPLICAS + " is " + numPullReplicas
-            + ". This requires " + requestedShardsToCreate
-            + " shards to be created (higher than the allowed number)");
-      }
-      replicaPositions = Assign.identifyNodes(cloudManager
-          , clusterState, nodeList, collectionName, message, shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas);
-      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-    }
-    return replicaPositions;
-  }
-
-  String getConfigName(String coll, ZkNodeProps message) throws KeeperException, InterruptedException {
-    String configName = message.getStr(COLL_CONF);
-
-    if (configName == null) {
-      // if there is only one conf, use that
-      List<String> configNames = null;
-      try {
-        configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
-        if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
-          if (!CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
-            copyDefaultConfigSetTo(configNames, coll);
-          }
-          return coll;
-        } else if (configNames != null && configNames.size() == 1) {
-          configName = configNames.get(0);
-          // no config set named, but there is only 1 - use it
-          log.info("Only one config set found in zk - using it:" + configName);
-        }
-      } catch (KeeperException.NoNodeException e) {
-
-      }
-    }
-    return "".equals(configName)? null: configName;
-  }
-  
-  /**
-   * Copies the _default configset to the specified configset name (overwrites if pre-existing)
-   */
-  private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
-    ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
-
-    // if a configset named coll exists, delete the configset so that _default can be copied over
-    if (configNames.contains(targetConfig)) {
-      log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
-          ", deleting it so that we can copy the _default configs over and create the collection.");
-      try {
-        configManager.deleteConfigDir(targetConfig);
-      } catch (Exception e) {
-        throw new SolrException(ErrorCode.INVALID_STATE, "Error while deleting configset: " + targetConfig, e);
-      }
-    } else {
-      log.info("Only _default config set found, using it.");
-    }
-    // Copy _default into targetConfig
-    try {
-      configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
-    }
-  }
-
-  public static void createCollectionZkNode(DistribStateManager stateManager, String collection, Map<String,String> params) {
-    log.debug("Check for collection zkNode:" + collection);
-    String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
-
-    try {
-      if (!stateManager.hasData(collectionPath)) {
-        log.debug("Creating collection in ZooKeeper:" + collection);
-
-        try {
-          Map<String,Object> collectionProps = new HashMap<>();
-
-          // TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that?
-          String defaultConfigName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, collection);
-
-          if (params.size() > 0) {
-            collectionProps.putAll(params);
-            // if the config name wasn't passed in, use the default
-            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP)) {
-              // users can create the collection node and conf link ahead of time, or this may return another option
-              getConfName(stateManager, collection, collectionPath, collectionProps);
-            }
-
-          } else if (System.getProperty("bootstrap_confdir") != null) {
-            // if we are bootstrapping a collection, default the config for
-            // a new collection to the collection we are bootstrapping
-            log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
-
-            Properties sysProps = System.getProperties();
-            for (String sprop : System.getProperties().stringPropertyNames()) {
-              if (sprop.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-                collectionProps.put(sprop.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
-              }
-            }
-
-            // if the config name wasn't passed in, use the default
-            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP))
-              collectionProps.put(ZkController.CONFIGNAME_PROP, defaultConfigName);
-
-          } else if (Boolean.getBoolean("bootstrap_conf")) {
-            // the conf name should should be the collection name of this core
-            collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
-          } else {
-            getConfName(stateManager, collection, collectionPath, collectionProps);
-          }
-
-          collectionProps.remove(ZkStateReader.NUM_SHARDS_PROP);  // we don't put numShards in the collections properties
-
-          ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
-          stateManager.makePath(collectionPath, Utils.toJSON(zkProps), CreateMode.PERSISTENT, false);
-
-        } catch (KeeperException e) {
-          // it's okay if the node already exists
-          if (e.code() != KeeperException.Code.NODEEXISTS) {
-            throw e;
-          }
-        } catch (AlreadyExistsException e) {
-          // it's okay if the node already exists
-        }
-      } else {
-        log.debug("Collection zkNode exists");
-      }
-
-    } catch (KeeperException e) {
-      // it's okay if another beats us creating the node
-      if (e.code() == KeeperException.Code.NODEEXISTS) {
-        return;
-      }
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    } catch (IOException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    }
-
-  }
-  
-  private static void getConfName(DistribStateManager stateManager, String collection, String collectionPath, Map<String,Object> collectionProps) throws IOException,
-      KeeperException, InterruptedException {
-    // check for configName
-    log.debug("Looking for collection configName");
-    if (collectionProps.containsKey("configName")) {
-      log.info("configName was passed as a param {}", collectionProps.get("configName"));
-      return;
-    }
-
-    List<String> configNames = null;
-    int retry = 1;
-    int retryLimt = 6;
-    for (; retry < retryLimt; retry++) {
-      if (stateManager.hasData(collectionPath)) {
-        VersionedData data = stateManager.getData(collectionPath);
-        ZkNodeProps cProps = ZkNodeProps.load(data.getData());
-        if (cProps.containsKey(ZkController.CONFIGNAME_PROP)) {
-          break;
-        }
-      }
-
-      try {
-        configNames = stateManager.listData(ZkConfigManager.CONFIGS_ZKNODE);
-      } catch (NoSuchElementException | NoNodeException e) {
-        // just keep trying
-      }
-
-      // check if there's a config set with the same name as the collection
-      if (configNames != null && configNames.contains(collection)) {
-        log.info(
-            "Could not find explicit collection configName, but found config name matching collection name - using that set.");
-        collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
-        break;
-      }
-      // if _default exists, use that
-      if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
-        log.info(
-            "Could not find explicit collection configName, but found _default config set - using that set.");
-        collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
-        break;
-      }
-      // if there is only one conf, use that
-      if (configNames != null && configNames.size() == 1) {
-        // no config set named, but there is only 1 - use it
-        log.info("Only one config set found in zk - using it:" + configNames.get(0));
-        collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
-        break;
-      }
-
-      log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
-      Thread.sleep(3000);
-    }
-    if (retry == retryLimt) {
-      log.error("Could not find configName for collection " + collection);
-      throw new ZooKeeperException(
-          SolrException.ErrorCode.SERVER_ERROR,
-          "Could not find configName for collection " + collection + " found:" + configNames);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
deleted file mode 100644
index c6afdcc..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.Assign.getNodesForNewReplicas;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class CreateShardCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String sliceName = message.getStr(SHARD_ID_PROP);
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-
-    log.info("Create shard invoked: {}", message);
-    if (collectionName == null || sliceName == null)
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    SolrCloseableLatch countDownLatch;
-    try {
-      List<ReplicaPosition> positions = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, sessionWrapper);
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-      // wait for a while until we see the shard
-      ocmh.waitForNewShard(collectionName, sliceName);
-
-      String async = message.getStr(ASYNC);
-      countDownLatch = new SolrCloseableLatch(positions.size(), ocmh);
-      for (ReplicaPosition position : positions) {
-        String nodeName = position.node;
-        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(), collection, sliceName, position.type);
-        log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
-            + " on " + nodeName);
-
-        // Need to create new params for each request
-        ZkNodeProps addReplicasProps = new ZkNodeProps(
-            COLLECTION_PROP, collectionName,
-            SHARD_ID_PROP, sliceName,
-            ZkStateReader.REPLICA_TYPE, position.type.name(),
-            CoreAdminParams.NODE, nodeName,
-            CoreAdminParams.NAME, coreName,
-            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        Map<String, Object> propertyParams = new HashMap<>();
-        ocmh.addPropertyParams(message, propertyParams);
-        addReplicasProps = addReplicasProps.plus(propertyParams);
-        if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
-        final NamedList addResult = new NamedList();
-        ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
-          countDownLatch.countDown();
-          Object addResultFailure = addResult.get("failure");
-          if (addResultFailure != null) {
-            SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
-            if (failure == null) {
-              failure = new SimpleOrderedMap();
-              results.add("failure", failure);
-            }
-            failure.addAll((NamedList) addResultFailure);
-          } else {
-            SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
-            if (success == null) {
-              success = new SimpleOrderedMap();
-              results.add("success", success);
-            }
-            success.addAll((NamedList) addResult.get("success"));
-          }
-        });
-      }
-    } finally {
-      if (sessionWrapper.get() != null) sessionWrapper.get().release();
-    }
-
-    log.debug("Waiting for create shard action to complete");
-    countDownLatch.await(5, TimeUnit.MINUTES);
-    log.debug("Finished waiting for create shard action to complete");
-
-    log.info("Finished create command on all shards for collection: " + collectionName);
-
-  }
-
-  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
-         String collectionName, ZkNodeProps message, AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    String sliceName = message.getStr(SHARD_ID_PROP);
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
-    int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
-    int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
-    int totalReplicas = numNrtReplicas + numPullReplicas + numTlogReplicas;
-
-    if (numNrtReplicas + numTlogReplicas <= 0) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
-    }
-
-    Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
-
-    boolean usePolicyFramework = CloudUtil.usePolicyFramework(collection, cloudManager);
-    List<ReplicaPosition> positions;
-    if (usePolicyFramework) {
-      if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
-      positions = Assign.identifyNodes(cloudManager,
-          clusterState,
-          Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM),
-          collection.getName(),
-          message,
-          Collections.singletonList(sliceName),
-          numNrtReplicas,
-          numTlogReplicas,
-          numPullReplicas);
-      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-    } else {
-      List<Assign.ReplicaCount> sortedNodeList = getNodesForNewReplicas(clusterState, collection.getName(), sliceName, totalReplicas,
-          createNodeSetStr, cloudManager);
-      int i = 0;
-      positions = new ArrayList<>();
-      for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
-          Replica.Type.TLOG, numTlogReplicas,
-          Replica.Type.PULL, numPullReplicas
-      ).entrySet()) {
-        for (int j = 0; j < e.getValue(); j++) {
-          positions.add(new ReplicaPosition(sliceName, j + 1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
-          i++;
-        }
-      }
-    }
-    return positions;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java
deleted file mode 100644
index 5de65a4..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the functionality of creating a collection level snapshot.
- */
-public class CreateSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName =  message.getStr(COLLECTION_PROP);
-    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
-    String asyncId = message.getStr(ASYNC);
-    SolrZkClient zkClient = this.ocmh.overseer.getZkController().getZkClient();
-    Date creationDate = new Date();
-
-    if(SolrSnapshotManager.snapshotExists(zkClient, collectionName, commitName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
-          + " already exists for collection " + collectionName);
-    }
-
-    log.info("Creating a snapshot for collection={} with commitName={}", collectionName, commitName);
-
-    // Create a node in ZK to store the collection level snapshot meta-data.
-    SolrSnapshotManager.createCollectionLevelSnapshot(zkClient, collectionName, new CollectionSnapshotMetaData(commitName));
-    log.info("Created a ZK path to store snapshot information for collection={} with commitName={}", collectionName, commitName);
-
-    Map<String, String> requestMap = new HashMap<>();
-    NamedList shardRequestResults = new NamedList();
-    Map<String, Slice> shardByCoreName = new HashMap<>();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        if (replica.getState() != State.ACTIVE) {
-          log.info("Replica {} is not active. Hence not sending the createsnapshot request", replica.getCoreName());
-          continue; // Since replica is not active - no point sending a request.
-        }
-
-        String coreName = replica.getStr(CORE_NAME_PROP);
-
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATESNAPSHOT.toString());
-        params.set(NAME, slice.getName());
-        params.set(CORE_NAME_PROP, coreName);
-        params.set(CoreAdminParams.COMMIT_NAME, commitName);
-
-        ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-        log.debug("Sent createsnapshot request to core={} with commitName={}", coreName, commitName);
-
-        shardByCoreName.put(coreName, slice);
-      }
-    }
-
-    // At this point we want to make sure that at-least one replica for every shard
-    // is able to create the snapshot. If that is not the case, then we fail the request.
-    // This is to take care of the situation where e.g. entire shard is unavailable.
-    Set<String> failedShards = new HashSet<>();
-
-    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
-    NamedList success = (NamedList) shardRequestResults.get("success");
-    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
-    if (success != null) {
-      for ( int i = 0 ; i < success.size() ; i++) {
-        NamedList resp = (NamedList)success.getVal(i);
-
-        // Check if this core is the leader for the shard. The idea here is that during the backup
-        // operation we preferably use the snapshot of the "leader" replica since it is most likely
-        // to have latest state.
-        String coreName = (String)resp.get(CoreAdminParams.CORE);
-        Slice slice = shardByCoreName.remove(coreName);
-        boolean leader = (slice.getLeader() != null && slice.getLeader().getCoreName().equals(coreName));
-        resp.add(SolrSnapshotManager.SHARD_ID, slice.getName());
-        resp.add(SolrSnapshotManager.LEADER, leader);
-
-        CoreSnapshotMetaData c = new CoreSnapshotMetaData(resp);
-        replicas.add(c);
-        log.info("Snapshot with commitName {} is created successfully for core {}", commitName, c.getCoreName());
-      }
-    }
-
-    if (!shardByCoreName.isEmpty()) { // One or more failures.
-      log.warn("Unable to create a snapshot with name {} for following cores {}", commitName, shardByCoreName.keySet());
-
-      // Count number of failures per shard.
-      Map<String, Integer> failuresByShardId = new HashMap<>();
-      for (Map.Entry<String,Slice> entry : shardByCoreName.entrySet()) {
-        int f = 0;
-        if (failuresByShardId.get(entry.getValue().getName()) != null) {
-          f = failuresByShardId.get(entry.getValue().getName());
-        }
-        failuresByShardId.put(entry.getValue().getName(), f + 1);
-      }
-
-      // Now that we know number of failures per shard, we can figure out
-      // if at-least one replica per shard was able to create a snapshot or not.
-      DocCollection collectionStatus = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
-      for (Map.Entry<String,Integer> entry : failuresByShardId.entrySet()) {
-        int replicaCount = collectionStatus.getSlice(entry.getKey()).getReplicas().size();
-        if (replicaCount <= entry.getValue()) {
-          failedShards.add(entry.getKey());
-        }
-      }
-    }
-
-    if (failedShards.isEmpty()) { // No failures.
-      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Successful, creationDate, replicas);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
-      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
-          commitName, meta.toNamedList());
-    } else {
-      log.warn("Failed to create a snapshot for collection {} with commitName = {}. Snapshot could not be captured for following shards {}",
-          collectionName, commitName, failedShards);
-      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
-      // which cores have the named snapshot.
-      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Failed, creationDate, replicas);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
-      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
-          commitName, meta.toNamedList());
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to create snapshot on shards " + failedShards);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java
deleted file mode 100644
index 9c9f1c6..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java
+++ /dev/null
@@ -1,43 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class DeleteAliasCmd implements OverseerCollectionMessageHandler.Cmd {
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String aliasName = message.getStr(NAME);
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    zkStateReader.aliasesHolder.applyModificationAndExportToZk(a -> a.cloneWithCollectionAlias(aliasName, null));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
deleted file mode 100644
index 8ee0168..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
+++ /dev/null
@@ -1,141 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.NonExistentCoreException;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public DeleteCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    Aliases aliases = zkStateReader.getAliases();
-    final String collection = message.getStr(NAME);
-    for (Map.Entry<String, List<String>> ent :  aliases.getCollectionAliasListMap().entrySet()) {
-      if (ent.getValue().contains(collection)) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Collection : " + collection + " is part of alias " + ent.getKey() + " remove or modify the alias before removing this collection.");
-      }
-    }
-
-    try {
-      // Remove the snapshots meta-data for this collection in ZK. Deleting actual index files
-      // should be taken care of as part of collection delete operation.
-      SolrZkClient zkClient = zkStateReader.getZkClient();
-      SolrSnapshotManager.cleanupCollectionLevelSnapshots(zkClient, collection);
-
-      if (zkStateReader.getClusterState().getCollectionOrNull(collection) == null) {
-        if (zkStateReader.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
-          // if the collection is not in the clusterstate, but is listed in zk, do nothing, it will just
-          // be removed in the finally - we cannot continue, because the below code will error if the collection
-          // is not in the clusterstate
-          return;
-        }
-      }
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
-      params.set(CoreAdminParams.DELETE_INSTANCE_DIR, true);
-      params.set(CoreAdminParams.DELETE_DATA_DIR, true);
-
-      String asyncId = message.getStr(ASYNC);
-      Map<String, String> requestMap = null;
-      if (asyncId != null) {
-        requestMap = new HashMap<>();
-      }
-
-      Set<String> okayExceptions = new HashSet<>(1);
-      okayExceptions.add(NonExistentCoreException.class.getName());
-
-      ocmh.collectionCmd(message, params, results, null, asyncId, requestMap, okayExceptions);
-
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETE.toLower(), NAME, collection);
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-      // wait for a while until we don't see the collection
-      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean removed = false;
-      while (! timeout.hasTimedOut()) {
-        timeout.sleep(100);
-        removed = !zkStateReader.getClusterState().hasCollection(collection);
-        if (removed) {
-          timeout.sleep(500); // just a bit of time so it's more likely other
-          // readers see on return
-          break;
-        }
-      }
-      if (!removed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Could not fully remove collection: " + collection);
-      }
-
-    } finally {
-
-      try {
-        if (zkStateReader.getZkClient().exists(
-            ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
-          zkStateReader.getZkClient().clean(
-              ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection);
-        }
-      } catch (InterruptedException e) {
-        SolrException.log(log, "Cleaning up collection in zk was interrupted:"
-            + collection, e);
-        Thread.currentThread().interrupt();
-      } catch (KeeperException e) {
-        SolrException.log(log, "Problem cleaning up collection in zk:"
-            + collection, e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
deleted file mode 100644
index 51b0956..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Locale;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ocmh.checkRequired(message, "node");
-    String node = message.getStr("node");
-    if (!state.liveNodesContain(node)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + node + " is not live");
-    }
-    List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
-    List<String> singleReplicas = verifyReplicaAvailability(sourceReplicas, state);
-    if (!singleReplicas.isEmpty()) {
-      results.add("failure", "Can't delete the only existing non-PULL replica(s) on node " + node + ": " + singleReplicas.toString());
-    } else {
-      cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
-    }
-  }
-
-  // collect names of replicas that cannot be deleted
-  static List<String> verifyReplicaAvailability(List<ZkNodeProps> sourceReplicas, ClusterState state) {
-    List<String> res = new ArrayList<>();
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      String coll = sourceReplica.getStr(COLLECTION_PROP);
-      String shard = sourceReplica.getStr(SHARD_ID_PROP);
-      String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
-      DocCollection collection = state.getCollection(coll);
-      Slice slice = collection.getSlice(shard);
-      if (slice.getReplicas().size() < 2) {
-        // can't delete the only replica in existence
-        res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-      } else { // check replica types
-        int otherNonPullReplicas = 0;
-        for (Replica r : slice.getReplicas()) {
-          if (!r.getName().equals(replicaName) && !r.getType().equals(Replica.Type.PULL)) {
-            otherNonPullReplicas++;
-          }
-        }
-        // can't delete - there are no other non-pull replicas
-        if (otherNonPullReplicas == 0) {
-          res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-        }
-      }
-    }
-    return res;
-  }
-
-  static void cleanupReplicas(NamedList results,
-                              ClusterState clusterState,
-                              List<ZkNodeProps> sourceReplicas,
-                              OverseerCollectionMessageHandler ocmh,
-                              String node,
-                              String async) throws InterruptedException {
-    CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      String coll = sourceReplica.getStr(COLLECTION_PROP);
-      String shard = sourceReplica.getStr(SHARD_ID_PROP);
-      String type = sourceReplica.getStr(ZkStateReader.REPLICA_TYPE);
-      log.info("Deleting replica type={} for collection={} shard={} on node={}", type, coll, shard, node);
-      NamedList deleteResult = new NamedList();
-      try {
-        if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);
-        ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, sourceReplica.plus("parallel", "true"), deleteResult, () -> {
-          cleanupLatch.countDown();
-          if (deleteResult.get("failure") != null) {
-            synchronized (results) {
-
-              results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
-                  " on node=%s", coll, shard, node));
-            }
-          }
-        });
-      } catch (KeeperException e) {
-        log.warn("Error deleting ", e);
-        cleanupLatch.countDown();
-      } catch (Exception e) {
-        log.warn("Error deleting ", e);
-        cleanupLatch.countDown();
-        throw e;
-      }
-    }
-    log.debug("Waiting for delete node action to complete");
-    cleanupLatch.await(5, TimeUnit.MINUTES);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
deleted file mode 100644
index e71d7e8..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-
-public class DeleteReplicaCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    deleteReplica(clusterState, message, results,null);
-  }
-
-
-  @SuppressWarnings("unchecked")
-  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-          throws KeeperException, InterruptedException {
-    log.debug("deleteReplica() : {}", Utils.toJSONString(message));
-    boolean parallel = message.getBool("parallel", false);
-
-    //If a count is specified the strategy needs be different
-    if (message.getStr(COUNT_PROP) != null) {
-      deleteReplicaBasedOnCount(clusterState, message, results, onComplete, parallel);
-      return;
-    }
-
-
-    ocmh.checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP);
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String replicaName = message.getStr(REPLICA_PROP);
-
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Slice slice = coll.getSlice(shard);
-    if (slice == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Invalid shard name : " +  shard + " in collection : " +  collectionName);
-    }
-
-    deleteCore(slice, collectionName, replicaName, message, shard, results, onComplete,  parallel);
-
-  }
-
-
-  /**
-   * Delete replicas based on count for a given collection. If a shard is passed, uses that
-   * else deletes given num replicas across all shards for the given collection.
-   */
-  void deleteReplicaBasedOnCount(ClusterState clusterState,
-                                 ZkNodeProps message,
-                                 NamedList results,
-                                 Runnable onComplete,
-                                 boolean parallel)
-          throws KeeperException, InterruptedException {
-    ocmh.checkRequired(message, COLLECTION_PROP, COUNT_PROP);
-    int count = Integer.parseInt(message.getStr(COUNT_PROP));
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shard = message.getStr(SHARD_ID_PROP);
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Slice slice = null;
-    //Validate if shard is passed.
-    if (shard != null) {
-      slice = coll.getSlice(shard);
-      if (slice == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Invalid shard name : " +  shard +  " in collection : " + collectionName);
-      }
-    }
-
-    Map<Slice, Set<String>> shardToReplicasMapping = new HashMap<Slice, Set<String>>();
-    if (slice != null) {
-      Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(slice, shard, collectionName, count);
-      shardToReplicasMapping.put(slice,replicasToBeDeleted);
-    } else {
-
-      //If there are many replicas left, remove the rest based on count.
-      Collection<Slice> allSlices = coll.getSlices();
-      for (Slice individualSlice : allSlices) {
-        Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(individualSlice, individualSlice.getName(), collectionName, count);
-        shardToReplicasMapping.put(individualSlice, replicasToBeDeleted);
-      }
-    }
-
-    for (Slice shardSlice: shardToReplicasMapping.keySet()) {
-      String shardId = shardSlice.getName();
-      Set<String> replicas = shardToReplicasMapping.get(shardSlice);
-      //callDeleteReplica on all replicas
-      for (String replica: replicas) {
-        log.debug("Deleting replica {}  for shard {} based on count {}", replica, shardId, count);
-        deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
-      }
-      results.add("shard_id", shardId);
-      results.add("replicas_deleted", replicas);
-    }
-
-  }
-
-
-  /**
-   * Pick replicas to be deleted. Avoid picking the leader.
-   */
-  private Set<String> pickReplicasTobeDeleted(Slice slice, String shard, String collectionName, int count) {
-    validateReplicaAvailability(slice, shard, collectionName, count);
-    Collection<Replica> allReplicas = slice.getReplicas();
-    Set<String> replicasToBeRemoved = new HashSet<String>();
-    Replica leader = slice.getLeader();
-    for (Replica replica: allReplicas) {
-      if (count == 0) {
-        break;
-      }
-      //Try avoiding to pick up the leader to minimize activity on the cluster.
-      if (leader.getCoreName().equals(replica.getCoreName())) {
-        continue;
-      }
-      replicasToBeRemoved.add(replica.getName());
-      count --;
-    }
-    return replicasToBeRemoved;
-  }
-
-  /**
-   * Validate if there is less replicas than requested to remove. Also error out if there is
-   * only one replica available
-   */
-  private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
-    //If there is a specific shard passed, validate if there any or just 1 replica left
-    if (slice != null) {
-      Collection<Replica> allReplicasForShard = slice.getReplicas();
-      if (allReplicasForShard == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found  in shard/collection: " +
-                shard + "/"  + collectionName);
-      }
-
-
-      if (allReplicasForShard.size() == 1) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
-                shard + "/" + collectionName + ". Cannot delete that.");
-      }
-
-      if (allReplicasForShard.size() <= count) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
-                shard + "/"  + collectionName  + " Requested: "  + count + " Available: " + allReplicasForShard.size() + ".");
-      }
-    }
-  }
-
-  void deleteCore(Slice slice, String collectionName, String replicaName,ZkNodeProps message, String shard, NamedList results, Runnable onComplete, boolean parallel) throws KeeperException, InterruptedException {
-
-    Replica replica = slice.getReplica(replicaName);
-    if (replica == null) {
-      ArrayList<String> l = new ArrayList<>();
-      for (Replica r : slice.getReplicas())
-        l.add(r.getName());
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " +  replicaName + " in shard/collection : " +
-              shard  + "/" + collectionName + " available replicas are " +  StrUtils.join(l, ','));
-    }
-
-    // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
-    // on the command.
-    if (Boolean.parseBoolean(message.getStr(ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Attempted to remove replica : " + collectionName + "/"  + shard + "/" + replicaName +
-              " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
-    }
-
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-    String asyncId = message.getStr(ASYNC);
-    AtomicReference<Map<String, String>> requestMap = new AtomicReference<>(null);
-    if (asyncId != null) {
-      requestMap.set(new HashMap<>(1, 1.0f));
-    }
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
-    params.add(CoreAdminParams.CORE, core);
-
-    params.set(CoreAdminParams.DELETE_INDEX, message.getBool(CoreAdminParams.DELETE_INDEX, true));
-    params.set(CoreAdminParams.DELETE_INSTANCE_DIR, message.getBool(CoreAdminParams.DELETE_INSTANCE_DIR, true));
-    params.set(CoreAdminParams.DELETE_DATA_DIR, message.getBool(CoreAdminParams.DELETE_DATA_DIR, true));
-
-    boolean isLive = ocmh.zkStateReader.getClusterState().getLiveNodes().contains(replica.getNodeName());
-    if (isLive) {
-      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap.get());
-    }
-
-    Callable<Boolean> callable = () -> {
-      try {
-        if (isLive) {
-          ocmh.processResponses(results, shardHandler, false, null, asyncId, requestMap.get());
-
-          //check if the core unload removed the corenode zk entry
-          if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 5000)) return Boolean.TRUE;
-        }
-
-        // try and ensure core info is removed from cluster state
-        ocmh.deleteCoreNode(collectionName, replicaName, replica, core);
-        if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return Boolean.TRUE;
-        return Boolean.FALSE;
-      } catch (Exception e) {
-        results.add("failure", "Could not complete delete " + e.getMessage());
-        throw e;
-      } finally {
-        if (onComplete != null) onComplete.run();
-      }
-    };
-
-    if (!parallel) {
-      try {
-        if (!callable.call())
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                  "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
-      } catch (InterruptedException | KeeperException e) {
-        throw e;
-      } catch (Exception ex) {
-        throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Error waiting for corenode gone", ex);
-      }
-
-    } else {
-      ocmh.tpe.submit(callable);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
deleted file mode 100644
index 58c4e63..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
+++ /dev/null
@@ -1,178 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class DeleteShardCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public DeleteShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
-
-    log.info("Delete shard invoked");
-    Slice slice = clusterState.getCollection(collectionName).getSlice(sliceId);
-    if (slice == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-        "No shard with name " + sliceId + " exists for collection " + collectionName);
-
-    // For now, only allow for deletions of Inactive slices or custom hashes (range==null).
-    // TODO: Add check for range gaps on Slice deletion
-    final Slice.State state = slice.getState();
-    if (!(slice.getRange() == null || state == Slice.State.INACTIVE || state == Slice.State.RECOVERY
-        || state == Slice.State.CONSTRUCTION) || state == Slice.State.RECOVERY_FAILED) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
-          + ". Only non-active (or custom-hashed) slices can be deleted.");
-    }
-
-    if (state == Slice.State.RECOVERY)  {
-      // mark the slice as 'construction' and only then try to delete the cores
-      // see SOLR-9455
-      DistributedQueue inQueue = Overseer.getStateUpdateQueue(ocmh.zkStateReader.getZkClient());
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-      propMap.put(sliceId, Slice.State.CONSTRUCTION.toString());
-      propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-      ZkNodeProps m = new ZkNodeProps(propMap);
-      inQueue.offer(Utils.toJSON(m));
-    }
-
-    String asyncId = message.getStr(ASYNC);
-
-    try {
-      List<ZkNodeProps> replicas = getReplicasForSlice(collectionName, slice);
-      CountDownLatch cleanupLatch = new CountDownLatch(replicas.size());
-      for (ZkNodeProps r : replicas) {
-        final ZkNodeProps replica = r.plus(message.getProperties()).plus("parallel", "true").plus(ASYNC, asyncId);
-        log.info("Deleting replica for collection={} shard={} on node={}", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(CoreAdminParams.NODE));
-        NamedList deleteResult = new NamedList();
-        try {
-          ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, replica, deleteResult, () -> {
-            cleanupLatch.countDown();
-            if (deleteResult.get("failure") != null) {
-              synchronized (results) {
-                results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
-                    " on node=%s", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(NODE_NAME_PROP)));
-              }
-            }
-            SimpleOrderedMap success = (SimpleOrderedMap) deleteResult.get("success");
-            if (success != null) {
-              synchronized (results)  {
-                results.add("success", success);
-              }
-            }
-          });
-        } catch (KeeperException e) {
-          log.warn("Error deleting replica: " + r, e);
-          cleanupLatch.countDown();
-        } catch (Exception e) {
-          log.warn("Error deleting replica: " + r, e);
-          cleanupLatch.countDown();
-          throw e;
-        }
-      }
-      log.debug("Waiting for delete shard action to complete");
-      cleanupLatch.await(5, TimeUnit.MINUTES);
-
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP,
-          collectionName, ZkStateReader.SHARD_ID_PROP, sliceId);
-      ZkStateReader zkStateReader = ocmh.zkStateReader;
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-      // wait for a while until we don't see the shard
-      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean removed = false;
-      while (!timeout.hasTimedOut()) {
-        timeout.sleep(100);
-        DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-        removed = collection.getSlice(sliceId) == null;
-        if (removed) {
-          timeout.sleep(100); // just a bit of time so it's more likely other readers see on return
-          break;
-        }
-      }
-      if (!removed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Could not fully remove collection: " + collectionName + " shard: " + sliceId);
-      }
-
-      log.info("Successfully deleted collection: " + collectionName + ", shard: " + sliceId);
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error executing delete operation for collection: " + collectionName + " shard: " + sliceId, e);
-    }
-  }
-
-  private List<ZkNodeProps> getReplicasForSlice(String collectionName, Slice slice) {
-    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
-    for (Replica replica : slice.getReplicas()) {
-      ZkNodeProps props = new ZkNodeProps(
-          COLLECTION_PROP, collectionName,
-          SHARD_ID_PROP, slice.getName(),
-          ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
-          ZkStateReader.REPLICA_PROP, replica.getName(),
-          CoreAdminParams.NODE, replica.getNodeName());
-      sourceReplicas.add(props);
-    }
-    return sourceReplicas;
-  }
-}