You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2017/04/20 10:20:44 UTC

[11/23] lucene-solr:feature/autoscaling: Squash-merge from master.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/CursorPagingTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/CursorPagingTest.java b/solr/core/src/test/org/apache/solr/CursorPagingTest.java
index b204677..eb1c6bc 100644
--- a/solr/core/src/test/org/apache/solr/CursorPagingTest.java
+++ b/solr/core/src/test/org/apache/solr/CursorPagingTest.java
@@ -19,7 +19,6 @@ package org.apache.solr;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.SentinelIntSet;
 import org.apache.lucene.util.mutable.MutableValueInt;
-import org.apache.solr.core.SolrInfoMBean;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.CursorMarkParams;
 import org.apache.solr.common.params.SolrParams;
@@ -32,6 +31,7 @@ import static org.apache.solr.common.params.CursorMarkParams.CURSOR_MARK_START;
 
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.metrics.MetricsMap;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.search.CursorMark; //jdoc
 import org.noggit.ObjectBuilder;
@@ -521,16 +521,16 @@ public class CursorPagingTest extends SolrTestCaseJ4 {
 
     final Collection<String> allFieldNames = getAllSortFieldNames();
 
-    final SolrInfoMBean filterCacheStats 
-      = h.getCore().getInfoRegistry().get("filterCache");
+    final MetricsMap filterCacheStats =
+        (MetricsMap)h.getCore().getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.filterCache");
     assertNotNull(filterCacheStats);
-    final SolrInfoMBean queryCacheStats 
-      = h.getCore().getInfoRegistry().get("queryResultCache");
+    final MetricsMap queryCacheStats =
+        (MetricsMap)h.getCore().getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.queryResultCache");
     assertNotNull(queryCacheStats);
 
-    final long preQcIn = (Long) queryCacheStats.getStatistics().get("inserts");
-    final long preFcIn = (Long) filterCacheStats.getStatistics().get("inserts");
-    final long preFcHits = (Long) filterCacheStats.getStatistics().get("hits");
+    final long preQcIn = (Long) queryCacheStats.getValue().get("inserts");
+    final long preFcIn = (Long) filterCacheStats.getValue().get("inserts");
+    final long preFcHits = (Long) filterCacheStats.getValue().get("hits");
 
     SentinelIntSet ids = assertFullWalkNoDups
       (10, params("q", "*:*",
@@ -542,9 +542,9 @@ public class CursorPagingTest extends SolrTestCaseJ4 {
     
     assertEquals(6, ids.size());
 
-    final long postQcIn = (Long) queryCacheStats.getStatistics().get("inserts");
-    final long postFcIn = (Long) filterCacheStats.getStatistics().get("inserts");
-    final long postFcHits = (Long) filterCacheStats.getStatistics().get("hits");
+    final long postQcIn = (Long) queryCacheStats.getValue().get("inserts");
+    final long postFcIn = (Long) filterCacheStats.getValue().get("inserts");
+    final long postFcHits = (Long) filterCacheStats.getValue().get("hits");
     
     assertEquals("query cache inserts changed", preQcIn, postQcIn);
     // NOTE: use of pure negative filters causees "*:* to be tracked in filterCache

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/SolrInfoBeanTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/SolrInfoBeanTest.java b/solr/core/src/test/org/apache/solr/SolrInfoBeanTest.java
new file mode 100644
index 0000000..d39c87f
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/SolrInfoBeanTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.core.SolrInfoBean;
+import org.apache.solr.handler.StandardRequestHandler;
+import org.apache.solr.handler.admin.LukeRequestHandler;
+import org.apache.solr.handler.component.SearchComponent;
+import org.apache.solr.highlight.DefaultSolrHighlighter;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.metrics.SolrMetricProducer;
+import org.apache.solr.search.LRUCache;
+import org.junit.BeforeClass;
+import java.io.File;
+import java.net.URI;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+
+/**
+ * A simple test used to increase code coverage for some standard things...
+ */
+public class SolrInfoBeanTest extends SolrTestCaseJ4
+{
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig.xml","schema.xml");
+  }
+
+  /**
+   * Gets a list of everything we can find in the classpath and makes sure it has
+   * a name, description, etc...
+   */
+  public void testCallMBeanInfo() throws Exception {
+    List<Class> classes = new ArrayList<>();
+    classes.addAll(getClassesForPackage(StandardRequestHandler.class.getPackage().getName()));
+    classes.addAll(getClassesForPackage(SearchComponent.class.getPackage().getName()));
+    classes.addAll(getClassesForPackage(LukeRequestHandler.class.getPackage().getName()));
+    classes.addAll(getClassesForPackage(DefaultSolrHighlighter.class.getPackage().getName()));
+    classes.addAll(getClassesForPackage(LRUCache.class.getPackage().getName()));
+   // System.out.println(classes);
+    
+    int checked = 0;
+    SolrMetricManager metricManager = h.getCoreContainer().getMetricManager();
+    String registry = h.getCore().getCoreMetricManager().getRegistryName();
+    String scope = TestUtil.randomSimpleString(random(), 2, 10);
+    for( Class clazz : classes ) {
+      if( SolrInfoBean.class.isAssignableFrom( clazz ) ) {
+        try {
+          SolrInfoBean info = (SolrInfoBean)clazz.newInstance();
+          if (info instanceof SolrMetricProducer) {
+            ((SolrMetricProducer)info).initializeMetrics(metricManager, registry, scope);
+          }
+          
+          //System.out.println( info.getClass() );
+          assertNotNull( info.getName() );
+          assertNotNull( info.getDescription() );
+          assertNotNull( info.getCategory() );
+          
+          if( info instanceof LRUCache ) {
+            continue;
+          }
+          
+          assertNotNull( info.toString() );
+          checked++;
+        }
+        catch( InstantiationException ex ) {
+          // expected...
+          //System.out.println( "unable to initialize: "+clazz );
+        }
+      }
+    }
+    assertTrue( "there are at least 10 SolrInfoBean that should be found in the classpath, found " + checked, checked > 10 );
+  }
+  
+  private static List<Class> getClassesForPackage(String pckgname) throws Exception {
+    ArrayList<File> directories = new ArrayList<>();
+    ClassLoader cld = h.getCore().getResourceLoader().getClassLoader();
+    String path = pckgname.replace('.', '/');
+    Enumeration<URL> resources = cld.getResources(path);
+    while (resources.hasMoreElements()) {
+      final URI uri = resources.nextElement().toURI();
+      if (!"file".equalsIgnoreCase(uri.getScheme()))
+        continue;
+      final File f = new File(uri);
+      directories.add(f);
+    }
+      
+    ArrayList<Class> classes = new ArrayList<>();
+    for (File directory : directories) {
+      if (directory.exists()) {
+        String[] files = directory.list();
+        for (String file : files) {
+          if (file.endsWith(".class")) {
+             String clazzName = file.substring(0, file.length() - 6);
+             // exclude Test classes that happen to be in these packages.
+             // class.ForName'ing some of them can cause trouble.
+             if (!clazzName.endsWith("Test") && !clazzName.startsWith("Test")) {
+               classes.add(Class.forName(pckgname + '.' + clazzName));
+             }
+          }
+        }
+      }
+    }
+    assertFalse("No classes found in package '"+pckgname+"'; maybe your test classes are packaged as JAR file?", classes.isEmpty());
+    return classes;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java b/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java
deleted file mode 100644
index bfe2316..0000000
--- a/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr;
-
-import org.apache.solr.core.SolrInfoMBean;
-import org.apache.solr.handler.StandardRequestHandler;
-import org.apache.solr.handler.admin.LukeRequestHandler;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.highlight.DefaultSolrHighlighter;
-import org.apache.solr.search.LRUCache;
-import org.junit.BeforeClass;
-import java.io.File;
-import java.net.URI;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.List;
-
-/**
- * A simple test used to increase code coverage for some standard things...
- */
-public class SolrInfoMBeanTest extends SolrTestCaseJ4
-{
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    initCore("solrconfig.xml","schema.xml");
-  }
-
-  /**
-   * Gets a list of everything we can find in the classpath and makes sure it has
-   * a name, description, etc...
-   */
-  public void testCallMBeanInfo() throws Exception {
-    List<Class> classes = new ArrayList<>();
-    classes.addAll(getClassesForPackage(StandardRequestHandler.class.getPackage().getName()));
-    classes.addAll(getClassesForPackage(SearchComponent.class.getPackage().getName()));
-    classes.addAll(getClassesForPackage(LukeRequestHandler.class.getPackage().getName()));
-    classes.addAll(getClassesForPackage(DefaultSolrHighlighter.class.getPackage().getName()));
-    classes.addAll(getClassesForPackage(LRUCache.class.getPackage().getName()));
-   // System.out.println(classes);
-    
-    int checked = 0;
-    for( Class clazz : classes ) {
-      if( SolrInfoMBean.class.isAssignableFrom( clazz ) ) {
-        try {
-          SolrInfoMBean info = (SolrInfoMBean)clazz.newInstance();
-          
-          //System.out.println( info.getClass() );
-          assertNotNull( info.getName() );
-          assertNotNull( info.getDescription() );
-          assertNotNull( info.getCategory() );
-          
-          if( info instanceof LRUCache ) {
-            continue;
-          }
-          
-          assertNotNull( info.toString() );
-          // increase code coverage...
-          assertNotNull( info.getDocs() + "" );
-          assertNotNull( info.getStatistics()+"" );
-          checked++;
-        }
-        catch( InstantiationException ex ) {
-          // expected...
-          //System.out.println( "unable to initialize: "+clazz );
-        }
-      }
-    }
-    assertTrue( "there are at least 10 SolrInfoMBean that should be found in the classpath, found " + checked, checked > 10 );
-  }
-  
-  private static List<Class> getClassesForPackage(String pckgname) throws Exception {
-    ArrayList<File> directories = new ArrayList<>();
-    ClassLoader cld = h.getCore().getResourceLoader().getClassLoader();
-    String path = pckgname.replace('.', '/');
-    Enumeration<URL> resources = cld.getResources(path);
-    while (resources.hasMoreElements()) {
-      final URI uri = resources.nextElement().toURI();
-      if (!"file".equalsIgnoreCase(uri.getScheme()))
-        continue;
-      final File f = new File(uri);
-      directories.add(f);
-    }
-      
-    ArrayList<Class> classes = new ArrayList<>();
-    for (File directory : directories) {
-      if (directory.exists()) {
-        String[] files = directory.list();
-        for (String file : files) {
-          if (file.endsWith(".class")) {
-             String clazzName = file.substring(0, file.length() - 6);
-             // exclude Test classes that happen to be in these packages.
-             // class.ForName'ing some of them can cause trouble.
-             if (!clazzName.endsWith("Test") && !clazzName.startsWith("Test")) {
-               classes.add(Class.forName(pckgname + '.' + clazzName));
-             }
-          }
-        }
-      }
-    }
-    assertFalse("No classes found in package '"+pckgname+"'; maybe your test classes are packaged as JAR file?", classes.isEmpty());
-    return classes;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
index e659727..2d46551 100644
--- a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
+++ b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
@@ -838,7 +838,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 {
         Object realResponse = ObjectBuilder.fromJSON(strResponse);
         String err = JSONTestUtil.matchObj("/grouped/" + groupField, realResponse, modelResponse);
         if (err != null) {
-          log.error("GROUPING MISMATCH: " + err
+          log.error("GROUPING MISMATCH (" + queryIter + "): " + err
            + "\n\trequest="+req
            + "\n\tresult="+strResponse
            + "\n\texpected="+ JSONUtil.toJSON(modelResponse)
@@ -854,7 +854,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 {
         // assert post / pre grouping facets
         err = JSONTestUtil.matchObj("/facet_counts/facet_fields/"+FOO_STRING_FIELD, realResponse, expectedFacetResponse);
         if (err != null) {
-          log.error("GROUPING MISMATCH: " + err
+          log.error("GROUPING MISMATCH (" + queryIter + "): " + err
            + "\n\trequest="+req
            + "\n\tresult="+strResponse
            + "\n\texpected="+ JSONUtil.toJSON(expectedFacetResponse)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
index 6ca072b..869650df 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
@@ -57,6 +57,10 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
 
     CollectionAdminRequest.createAlias("testalias", "collection1").process(cluster.getSolrClient());
 
+    // ensure that the alias has been registered
+    assertEquals("collection1",
+        new CollectionAdminRequest.ListAliases().process(cluster.getSolrClient()).getAliases().get("testalias"));
+
     // search for alias
     QueryResponse res = cluster.getSolrClient().query("testalias", new SolrQuery("*:*"));
     assertEquals(3, res.getResults().getNumFound());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index d1dbe9c..1c23c9c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -59,6 +59,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -752,19 +753,28 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
 
   private Long getNumCommits(HttpSolrClient sourceClient) throws
       SolrServerException, IOException {
-    try (HttpSolrClient client = getHttpSolrClient(sourceClient.getBaseURL())) {
+    // construct the /admin/metrics URL
+    URL url = new URL(sourceClient.getBaseURL());
+    String path = url.getPath().substring(1);
+    String[] elements = path.split("/");
+    String collection = elements[elements.length - 1];
+    String urlString = url.toString();
+    urlString = urlString.substring(0, urlString.length() - collection.length() - 1);
+    try (HttpSolrClient client = getHttpSolrClient(urlString)) {
       client.setConnectionTimeout(15000);
       client.setSoTimeout(60000);
       ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("qt", "/admin/mbeans?key=updateHandler&stats=true");
+      //params.set("qt", "/admin/metrics?prefix=UPDATE.updateHandler&registry=solr.core." + collection);
+      params.set("qt", "/admin/metrics");
+      params.set("prefix", "UPDATE.updateHandler");
+      params.set("registry", "solr.core." + collection);
       // use generic request to avoid extra processing of queries
       QueryRequest req = new QueryRequest(params);
       NamedList<Object> resp = client.request(req);
-      NamedList mbeans = (NamedList) resp.get("solr-mbeans");
-      NamedList uhandlerCat = (NamedList) mbeans.get("UPDATE");
-      NamedList uhandler = (NamedList) uhandlerCat.get("updateHandler");
-      NamedList stats = (NamedList) uhandler.get("stats");
-      return (Long) stats.get("commits");
+      NamedList metrics = (NamedList) resp.get("metrics");
+      NamedList uhandlerCat = (NamedList) metrics.getVal(0);
+      Map<String,Object> commits = (Map<String,Object>) uhandlerCat.get("UPDATE.updateHandler.commits");
+      return (Long) commits.get("count");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java
index 26fa325..f48f76b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicZkTest.java
@@ -16,11 +16,14 @@
  */
 package org.apache.solr.cloud;
 
+import java.util.Map;
+
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
@@ -158,11 +161,11 @@ public class BasicZkTest extends AbstractZkTestCase {
     }
     
     // test stats call
-    NamedList stats = core.getStatistics();
-    assertEquals("collection1", stats.get("coreName"));
-    assertEquals("collection1", stats.get("collection"));
-    assertEquals("shard1", stats.get("shard"));
-    assertTrue(stats.get("refCount") != null);
+    Map<String, Metric> metrics = h.getCore().getCoreMetricManager().getRegistry().getMetrics();
+    assertEquals("collection1", ((Gauge)metrics.get("CORE.coreName")).getValue());
+    assertEquals("collection1", ((Gauge)metrics.get("CORE.collection")).getValue());
+    assertEquals("shard1", ((Gauge)metrics.get("CORE.shard")).getValue());
+    assertTrue(metrics.get("CORE.refCount") != null);
 
     //zkController.getZkClient().printLayoutToStdOut();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java
new file mode 100644
index 0000000..e0cf3f7
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.Utils;
+
+public class ClusterStateMockUtil {
+
+  private final static Pattern BLUEPRINT = Pattern.compile("([a-z])(\\d+)?(?:(['A','R','D','F']))?(\\*)?");
+
+  protected static class Result implements Closeable {
+    OverseerAutoReplicaFailoverThread.DownReplica badReplica;
+    ZkStateReader reader;
+
+    @Override
+    public void close() throws IOException {
+      reader.close();
+    }
+  }
+
+  protected static ClusterStateMockUtil.Result buildClusterState(List<Result> results, String string, String ... liveNodes) {
+    return buildClusterState(results, string, 1, liveNodes);
+  }
+
+  protected static ClusterStateMockUtil.Result buildClusterState(List<Result> results, String string, int replicationFactor, String ... liveNodes) {
+    return buildClusterState(results, string, replicationFactor, 10, liveNodes);
+  }
+
+  /**
+   * This method lets you construct a complex ClusterState object by using simple strings of letters.
+   *
+   * c = collection, s = slice, r = replica, \d = node number (r2 means the replica is on node 2),
+   * state = [A,R,D,F], * = replica to replace, binds to the left.
+   *
+   * For example:
+   * csrr2rD*sr2csr
+   *
+   * Creates:
+   *
+   * 'csrr2rD*'
+   * A collection, a shard, a replica on node 1 (the default) that is active (the default), a replica on node 2, and a replica on node 1
+   * that has a state of down and is the replica we will be looking to put somewhere else (the *).
+   *
+   * 'sr2'
+   * Then, another shard that has a replica on node 2.
+   *
+   * 'csr'
+   * Then, another collection that has a shard with a single active replica on node 1.
+   *
+   * Result:
+   *        {
+   *         "collection2":{
+   *           "maxShardsPerNode":"1",
+   *           "replicationFactor":"1",
+   *           "shards":{"slice1":{
+   *               "state":"active",
+   *               "replicas":{"replica5":{
+   *                   "state":"active",
+   *                   "node_name":"baseUrl1_",
+   *                   "base_url":"http://baseUrl1"}}}}},
+   *         "collection1":{
+   *           "maxShardsPerNode":"1",
+   *           "replicationFactor":"1",
+   *           "shards":{
+   *             "slice1":{
+   *               "state":"active",
+   *               "replicas":{
+   *                 "replica3 (bad)":{
+   *                   "state":"down",
+   *                   "node_name":"baseUrl1_",
+   *                   "base_url":"http://baseUrl1"},
+   *                 "replica2":{
+   *                   "state":"active",
+   *                   "node_name":"baseUrl2_",
+   *                   "base_url":"http://baseUrl2"},
+   *                 "replica1":{
+   *                   "state":"active",
+   *                   "node_name":"baseUrl1_",
+   *                   "base_url":"http://baseUrl1"}}},
+   *             "slice2":{
+   *               "state":"active",
+   *               "replicas":{"replica4":{
+   *                   "state":"active",
+   *                   "node_name":"baseUrl2_",
+   *                   "base_url":"http://baseUrl2"}}}}}}
+   *
+   */
+  @SuppressWarnings("resource")
+  protected static ClusterStateMockUtil.Result buildClusterState(List<Result> results, String clusterDescription, int replicationFactor, int maxShardsPerNode, String ... liveNodes) {
+    ClusterStateMockUtil.Result result = new ClusterStateMockUtil.Result();
+
+    Map<String,Slice> slices = null;
+    Map<String,Replica> replicas = null;
+    Map<String,Object> collectionProps = new HashMap<>();
+    collectionProps.put(ZkStateReader.MAX_SHARDS_PER_NODE, Integer.toString(maxShardsPerNode));
+    collectionProps.put(ZkStateReader.REPLICATION_FACTOR, Integer.toString(replicationFactor));
+    Map<String,DocCollection> collectionStates = new HashMap<>();
+    DocCollection docCollection = null;
+    Slice slice = null;
+    int replicaCount = 1;
+
+    Matcher m = BLUEPRINT.matcher(clusterDescription);
+    while (m.find()) {
+      Replica replica;
+      switch (m.group(1)) {
+        case "c":
+          slices = new HashMap<>();
+          docCollection = new DocCollection("collection" + (collectionStates.size() + 1), slices, collectionProps, null);
+          collectionStates.put(docCollection.getName(), docCollection);
+          break;
+        case "s":
+          replicas = new HashMap<>();
+          slice = new Slice("slice" + (slices.size() + 1), replicas, null);
+          slices.put(slice.getName(), slice);
+          break;
+        case "r":
+          Map<String,Object> replicaPropMap = new HashMap<>();
+          String node;
+
+          node = m.group(2);
+
+          if (node == null || node.trim().length() == 0) {
+            node = "1";
+          }
+
+          Replica.State state = Replica.State.ACTIVE;
+          String stateCode = m.group(3);
+
+          if (stateCode != null) {
+            switch (stateCode.charAt(0)) {
+              case 'S':
+                state = Replica.State.ACTIVE;
+                break;
+              case 'R':
+                state = Replica.State.RECOVERING;
+                break;
+              case 'D':
+                state = Replica.State.DOWN;
+                break;
+              case 'F':
+                state = Replica.State.RECOVERY_FAILED;
+                break;
+              default:
+                throw new IllegalArgumentException(
+                    "Unexpected state for replica: " + stateCode);
+            }
+          }
+
+          String nodeName = "baseUrl" + node + "_";
+          String replicaName = "replica" + replicaCount++;
+
+          if ("*".equals(m.group(4))) {
+            replicaName += " (bad)";
+          }
+
+          replicaPropMap.put(ZkStateReader.NODE_NAME_PROP, nodeName);
+          replicaPropMap.put(ZkStateReader.BASE_URL_PROP, "http://baseUrl" + node);
+          replicaPropMap.put(ZkStateReader.STATE_PROP, state.toString());
+
+          replica = new Replica(replicaName, replicaPropMap);
+
+          if ("*".equals(m.group(4))) {
+            result.badReplica = new OverseerAutoReplicaFailoverThread.DownReplica();
+            result.badReplica.replica = replica;
+            result.badReplica.slice = slice;
+            result.badReplica.collection = docCollection;
+          }
+
+          replicas.put(replica.getName(), replica);
+          break;
+        default:
+          break;
+      }
+    }
+
+    ClusterState clusterState = new ClusterState(1, new HashSet<>(Arrays.asList(liveNodes)), collectionStates);
+    MockZkStateReader reader = new MockZkStateReader(clusterState, collectionStates.keySet());
+
+    String json;
+    try {
+      json = new String(Utils.toJSON(clusterState), "UTF-8");
+    } catch (UnsupportedEncodingException e) {
+      throw new RuntimeException("Unexpected");
+    }
+    System.err.println(json);
+
+    // todo remove the limitation of always having a bad replica
+    assert result.badReplica != null : "Is there no bad replica?";
+    assert result.badReplica.slice != null : "Is there no bad replica?";
+
+    result.reader = reader;
+
+    if (results != null) {
+      results.add(result);
+    }
+
+    return result;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
index dcb115a..30c3c9e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
@@ -29,6 +29,7 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest.SplitShard;
 import org.apache.solr.client.solrj.response.RequestStatusState;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -178,11 +179,22 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
       //expected
     }
 
-    String replica = shard1.getReplicas().iterator().next().getName();
+    Replica replica = shard1.getReplicas().iterator().next();
+    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
+      if (!replica.getNodeName().equals(liveNode)) {
+        state = new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
+            .processAndWait(client, MAX_TIMEOUT_SECONDS);
+        assertSame("MoveReplica did not complete", RequestStatusState.COMPLETED, state);
+        break;
+      }
+    }
+
+    shard1 = client.getZkStateReader().getClusterState().getSlice(collection, "shard1");
+    String replicaName = shard1.getReplicas().iterator().next().getName();
     state = new CollectionAdminRequest.DeleteReplica()
         .setCollectionName(collection)
         .setShardName("shard1")
-        .setReplica(replica)
+        .setReplica(replicaName)
         .processAndWait(client, MAX_TIMEOUT_SECONDS);
     assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
index 7925358..ed9ed41 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
@@ -20,6 +20,7 @@ import javax.management.MBeanServer;
 import javax.management.MBeanServerFactory;
 import javax.management.ObjectName;
 import java.io.IOException;
+import java.lang.invoke.MethodHandles;
 import java.lang.management.ManagementFactory;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -37,6 +38,7 @@ import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.ImmutableList;
+import org.apache.commons.io.IOUtils;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.client.solrj.SolrClient;
@@ -68,12 +70,14 @@ import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrInfoMBean.Category;
+import org.apache.solr.core.SolrInfoBean.Category;
 import org.apache.solr.util.TestInjection;
 import org.apache.solr.util.TimeOut;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -83,6 +87,7 @@ import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
  */
 @Slow
 public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   @BeforeClass
   public static void beforeCollectionsAPIDistributedZkTest() {
@@ -94,9 +99,11 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
 
   @BeforeClass
   public static void setupCluster() throws Exception {
+    String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8");
     configureCluster(4)
         .addConfig("conf", configset("cloud-minimal"))
         .addConfig("conf2", configset("cloud-minimal-jmx"))
+        .withSolrXml(solrXml)
         .configure();
   }
 
@@ -549,7 +556,7 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
     for (SolrCore core : theCores) {
 
       // look for core props file
-      Path instancedir = (Path) core.getStatistics().get("instanceDir");
+      Path instancedir = (Path) core.getResourceLoader().getInstancePath();
       assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
 
       Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
@@ -620,25 +627,22 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
       Set<ObjectName> mbeans = new HashSet<>();
       mbeans.addAll(server.queryNames(null, null));
       for (final ObjectName mbean : mbeans) {
-        Object value;
-        Object indexDir;
-        Object name;
 
         try {
-          if (((value = server.getAttribute(mbean, "category")) != null && value
-              .toString().equals(Category.CORE.toString()))
-              && ((indexDir = server.getAttribute(mbean, "coreName")) != null)
-              && ((indexDir = server.getAttribute(mbean, "indexDir")) != null)
-              && ((name = server.getAttribute(mbean, "name")) != null)) {
-            if (!indexDirToShardNamesMap.containsKey(indexDir.toString())) {
-              indexDirToShardNamesMap.put(indexDir.toString(),
-                  new HashSet<String>());
+          Map<String, String> props = mbean.getKeyPropertyList();
+          String category = props.get("category");
+          String name = props.get("name");
+          if ((category != null && category.toString().equals(Category.CORE.toString())) &&
+              (name != null && name.equals("indexDir"))) {
+            String indexDir = server.getAttribute(mbean, "Value").toString();
+            String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
+            if (!indexDirToShardNamesMap.containsKey(indexDir)) {
+              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
             }
-            indexDirToShardNamesMap.get(indexDir.toString()).add(
-                name.toString());
+            indexDirToShardNamesMap.get(indexDir.toString()).add(key);
           }
         } catch (Exception e) {
-          // ignore, just continue - probably a "category" or "source" attribute
+          // ignore, just continue - probably a "Value" attribute
           // not found
         }
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java b/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
index b6754c7..d2d6a16 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
@@ -113,13 +113,15 @@ public class DistributedQueueTest extends SolrTestCaseJ4 {
 
     // After draining the queue, a watcher should be set.
     assertNull(dq.peek(100));
-    assertTrue(dq.hasWatcher());
+    assertFalse(dq.isDirty());
+    assertEquals(1, dq.watcherCount());
 
     forceSessionExpire();
 
     // Session expiry should have fired the watcher.
     Thread.sleep(100);
-    assertFalse(dq.hasWatcher());
+    assertTrue(dq.isDirty());
+    assertEquals(0, dq.watcherCount());
 
     // Rerun the earlier test make sure updates are still seen, post reconnection.
     future = executor.submit(() -> new String(dq.peek(true), UTF8));
@@ -138,6 +140,50 @@ public class DistributedQueueTest extends SolrTestCaseJ4 {
   }
 
   @Test
+  public void testLeakChildWatcher() throws Exception {
+    String dqZNode = "/distqueue/test";
+    DistributedQueue dq = makeDistributedQueue(dqZNode);
+    assertTrue(dq.peekElements(1, 1, s1 -> true).isEmpty());
+    assertEquals(1, dq.watcherCount());
+    assertFalse(dq.isDirty());
+    assertTrue(dq.peekElements(1, 1, s1 -> true).isEmpty());
+    assertEquals(1, dq.watcherCount());
+    assertFalse(dq.isDirty());
+    assertNull(dq.peek());
+    assertEquals(1, dq.watcherCount());
+    assertFalse(dq.isDirty());
+    assertNull(dq.peek(10));
+    assertEquals(1, dq.watcherCount());
+    assertFalse(dq.isDirty());
+
+    dq.offer("hello world".getBytes(UTF8));
+    assertNotNull(dq.peek()); // synchronously available
+    // dirty and watcher state indeterminate here, race with watcher
+    Thread.sleep(100); // watcher should have fired now
+    assertNotNull(dq.peek());
+    assertEquals(1, dq.watcherCount());
+    assertFalse(dq.isDirty());
+    assertFalse(dq.peekElements(1, 1, s -> true).isEmpty());
+    assertEquals(1, dq.watcherCount());
+    assertFalse(dq.isDirty());
+  }
+
+  @Test
+  public void testLocallyOffer() throws Exception {
+    String dqZNode = "/distqueue/test";
+    DistributedQueue dq = makeDistributedQueue(dqZNode);
+    dq.peekElements(1, 1, s -> true);
+    for (int i = 0; i < 100; i++) {
+      byte[] data = String.valueOf(i).getBytes(UTF8);
+      dq.offer(data);
+      assertNotNull(dq.peek());
+      dq.poll();
+      dq.peekElements(1, 1, s -> true);
+    }
+  }
+
+
+  @Test
   public void testPeekElements() throws Exception {
     String dqZNode = "/distqueue/test";
     byte[] data = "hello world".getBytes(UTF8);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/MoveReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaTest.java
new file mode 100644
index 0000000..4368fea
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MoveReplicaTest extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(4)
+        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
+        .configure();
+  }
+
+  protected String getSolrXml() {
+    return "solr.xml";
+  }
+
+  @Test
+  public void test() throws Exception {
+    cluster.waitForAllNodes(5000);
+    String coll = "movereplicatest_coll";
+    log.info("total_jettys: " + cluster.getJettySolrRunners().size());
+
+    CloudSolrClient cloudClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 2);
+    create.setMaxShardsPerNode(2);
+    cloudClient.request(create);
+
+    Replica replica = getRandomReplica(coll, cloudClient);
+    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    ArrayList<String> l = new ArrayList<>(liveNodes);
+    Collections.shuffle(l, random());
+    String targetNode = null;
+    for (String node : liveNodes) {
+      if (!replica.getNodeName().equals(node)) {
+        targetNode = node;
+        break;
+      }
+    }
+    assertNotNull(targetNode);
+    String shardId = null;
+    for (Slice slice : cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices()) {
+      if (slice.getReplicas().contains(replica)) {
+        shardId = slice.getName();
+      }
+    }
+
+    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
+    moveReplica.processAsync("000", cloudClient);
+    CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("000");
+    // wait for async request success
+    boolean success = false;
+    for (int i = 0; i < 200; i++) {
+      CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
+      if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
+        success = true;
+        break;
+      }
+      assertFalse(rsp.getRequestStatus() == RequestStatusState.FAILED);
+      Thread.sleep(50);
+    }
+    assertTrue(success);
+    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
+    checkNumOfCores(cloudClient, targetNode, 2);
+
+    moveReplica = new CollectionAdminRequest.MoveReplica(coll, shardId, targetNode, replica.getNodeName());
+    moveReplica.process(cloudClient);
+    checkNumOfCores(cloudClient, replica.getNodeName(), 1);
+    checkNumOfCores(cloudClient, targetNode, 1);
+  }
+
+  private Replica getRandomReplica(String coll, CloudSolrClient cloudClient) {
+    List<Replica> replicas = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getReplicas();
+    Collections.shuffle(replicas, random());
+    return replicas.get(0);
+  }
+
+  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
+    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
+  }
+
+  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
+      return status.getCoreStatus().size();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java b/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java
new file mode 100644
index 0000000..ffa6ba2
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/NodeMutatorTest.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.solr.SolrTestCaseJ4Test;
+import org.apache.solr.cloud.overseer.NodeMutator;
+import org.apache.solr.cloud.overseer.ZkWriteCommand;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.Test;
+
+public class NodeMutatorTest extends SolrTestCaseJ4Test {
+
+  private static final String NODE3 = "baseUrl3_";
+  private static final String NODE3_URL = "http://baseUrl3";
+
+  private static final String NODE2 = "baseUrl2_";
+  private static final String NODE2_URL = "http://baseUrl2";
+
+  private static final String NODE1 = "baseUrl1_";
+  private static final String NODE1_URL = "http://baseUrl1";
+
+  @Test
+  public void downNodeReportsAllImpactedCollectionsAndNothingElse() throws IOException {
+    NodeMutator nm = new NodeMutator();
+    ZkNodeProps props = new ZkNodeProps(ZkStateReader.NODE_NAME_PROP, NODE1);
+
+    //We use 2 nodes with maxShardsPerNode as 1
+    //Collection1: 2 shards X 1 replica = replica1 on node1 and replica2 on node2
+    //Collection2: 1 shard X 1 replica = replica1 on node2
+    ClusterStateMockUtil.Result result = ClusterStateMockUtil.buildClusterState(null, "csrr2rD*csr2", 1, 1, NODE1, NODE2);
+    ClusterState clusterState = result.reader.getClusterState();
+    assertEquals(clusterState.getCollection("collection1").getReplica("replica1").getBaseUrl(), NODE1_URL);
+    assertEquals(clusterState.getCollection("collection1").getReplica("replica2").getBaseUrl(), NODE2_URL);
+    assertEquals(clusterState.getCollection("collection2").getReplica("replica4").getBaseUrl(), NODE2_URL);
+
+    props = new ZkNodeProps(ZkStateReader.NODE_NAME_PROP, NODE1);
+    List<ZkWriteCommand> writes = nm.downNode(clusterState, props);
+    assertEquals(writes.size(), 1);
+    assertEquals(writes.get(0).name, "collection1");
+    assertEquals(writes.get(0).collection.getReplica("replica1").getState(), Replica.State.DOWN);
+    assertEquals(writes.get(0).collection.getReplica("replica2").getState(), Replica.State.ACTIVE);
+    result.close();
+
+    //We use 3 nodes with maxShardsPerNode as 1
+    //Collection1: 2 shards X 1 replica = replica1 on node1 and replica2 on node2
+    //Collection2: 1 shard X 1 replica = replica1 on node2
+    //Collection3: 1 shard X 3 replica = replica1 on node1 , replica2 on node2, replica3 on node3
+    result = ClusterStateMockUtil.buildClusterState(null, "csrr2rD*csr2csr1r2r3", 1, 1, NODE1, NODE2, NODE3);
+    clusterState = result.reader.getClusterState();
+    assertEquals(clusterState.getCollection("collection1").getReplica("replica1").getBaseUrl(), NODE1_URL);
+    assertEquals(clusterState.getCollection("collection1").getReplica("replica2").getBaseUrl(), NODE2_URL);
+
+    assertEquals(clusterState.getCollection("collection2").getReplica("replica4").getBaseUrl(), NODE2_URL);
+
+    assertEquals(clusterState.getCollection("collection3").getReplica("replica5").getBaseUrl(), NODE1_URL);
+    assertEquals(clusterState.getCollection("collection3").getReplica("replica6").getBaseUrl(), NODE2_URL);
+    assertEquals(clusterState.getCollection("collection3").getReplica("replica7").getBaseUrl(), NODE3_URL);
+
+    writes = nm.downNode(clusterState, props);
+    assertEquals(writes.size(), 2);
+    for (ZkWriteCommand write : writes) {
+      if (write.name.equals("collection1")) {
+        assertEquals(write.collection.getReplica("replica1").getState(), Replica.State.DOWN);
+        assertEquals(write.collection.getReplica("replica2").getState(), Replica.State.ACTIVE);
+      } else if (write.name.equals("collection3")) {
+        assertEquals(write.collection.getReplica("replica5").getState(), Replica.State.DOWN);
+        assertEquals(write.collection.getReplica("replica6").getState(), Replica.State.ACTIVE);
+        assertEquals(write.collection.getReplica("replica7").getState(), Replica.State.ACTIVE);
+      } else {
+        fail("No other collection needs to be changed");
+      }
+    }
+    result.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
index 9441e3f..9100eee 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
@@ -18,7 +18,6 @@ package org.apache.solr.cloud;
 
 import java.io.File;
 import java.lang.invoke.MethodHandles;
-import java.net.ServerSocket;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Locale;
@@ -71,14 +70,6 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
     return createProxiedJetty(solrHome, dataDir, shardList, solrConfigOverride, schemaOverride);
   }
   
-  protected int getNextAvailablePort() throws Exception {    
-    int port = -1;
-    try (ServerSocket s = new ServerSocket(0)) {
-      port = s.getLocalPort();
-    }
-    return port;
-  }
-
   @Test
   public void test() throws Exception {
     log.info("replication factor test running");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverUtilsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverUtilsTest.java b/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverUtilsTest.java
index f5fee21..3423420 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverUtilsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverUtilsTest.java
@@ -16,30 +16,16 @@
  */
 package org.apache.solr.cloud;
 
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.cloud.OverseerAutoReplicaFailoverThread.DownReplica;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import static org.apache.solr.cloud.ClusterStateMockUtil.buildClusterState;
+
 public class SharedFSAutoReplicaFailoverUtilsTest extends SolrTestCaseJ4 {
   private static final String NODE6 = "baseUrl6_";
   private static final String NODE6_URL = "http://baseUrl6";
@@ -58,12 +44,8 @@ public class SharedFSAutoReplicaFailoverUtilsTest extends SolrTestCaseJ4 {
 
   private static final String NODE1 = "baseUrl1_";
   private static final String NODE1_URL = "http://baseUrl1";
-
-  private final static Pattern BLUEPRINT = Pattern.compile("([a-z])(\\d+)?(?:(['A','R','D','F']))?(\\*)?");
-
-  private int buildNumber = 1;
   
-  private List<Result> results;
+  private List<ClusterStateMockUtil.Result> results;
   
   @Before
   public void setUp() throws Exception {
@@ -74,61 +56,50 @@ public class SharedFSAutoReplicaFailoverUtilsTest extends SolrTestCaseJ4 {
   @After
   public void tearDown() throws Exception {
     super.tearDown();
-    for (Result result : results) {
+    for (ClusterStateMockUtil.Result result : results) {
       result.close();
     }
   }
   
   @Test
   public void testGetBestCreateUrlBasics() {
-    Result result = buildClusterState("csr1R*r2", NODE1);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr1R*r2", NODE1);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertNull("Should be no live node to failover to", createUrl);
     
-    result = buildClusterState("csr1R*r2", NODE1, NODE2);
+    result = buildClusterState(results, "csr1R*r2", NODE1, NODE2);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertNull("Only failover candidate node already has a replica", createUrl);
     
-    result = buildClusterState("csr1R*r2sr3", NODE1, NODE2, NODE3);
+    result = buildClusterState(results, "csr1R*r2sr3", NODE1, NODE2, NODE3);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals("Node3 does not have a replica from the bad slice and should be the best choice", NODE3_URL, createUrl);
 
-    result = buildClusterState("csr1R*r2Fsr3r4r5", NODE1, NODE2, NODE3);
+    result = buildClusterState(results, "csr1R*r2Fsr3r4r5", NODE1, NODE2, NODE3);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertTrue(createUrl.equals(NODE3_URL));
 
-    result = buildClusterState("csr1*r2r3sr3r3sr4", NODE1, NODE2, NODE3, NODE4);
+    result = buildClusterState(results, "csr1*r2r3sr3r3sr4", NODE1, NODE2, NODE3, NODE4);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE4_URL, createUrl);
     
-    result = buildClusterState("csr1*r2sr3r3sr4sr4", NODE1, NODE2, NODE3, NODE4);
+    result = buildClusterState(results, "csr1*r2sr3r3sr4sr4", NODE1, NODE2, NODE3, NODE4);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertTrue(createUrl.equals(NODE3_URL) || createUrl.equals(NODE4_URL));
   }
-  
-  
-  private static class Result implements Closeable {
-    DownReplica badReplica;
-    ZkStateReader reader;
-    
-    @Override
-    public void close() throws IOException {
-      reader.close();
-    }
-  }
 
   @Test
   public void testGetBestCreateUrlMultipleCollections() throws Exception {
 
-    Result result = buildClusterState("csr*r2csr2", NODE1);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr*r2csr2", NODE1);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertNull(createUrl);
 
-    result = buildClusterState("csr*r2csr2", NODE1);
+    result = buildClusterState(results, "csr*r2csr2", NODE1);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertNull(createUrl);
 
-    result = buildClusterState("csr*r2csr2", NODE1, NODE2);
+    result = buildClusterState(results, "csr*r2csr2", NODE1, NODE2);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertNull(createUrl);
   }
@@ -136,11 +107,11 @@ public class SharedFSAutoReplicaFailoverUtilsTest extends SolrTestCaseJ4 {
   @Test
   public void testGetBestCreateUrlMultipleCollections2() {
     
-    Result result = buildClusterState("csr*r2sr3cr2", NODE1);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr*r2sr3cr2", NODE1);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertNull(createUrl);
 
-    result = buildClusterState("csr*r2sr3cr2", NODE1, NODE2, NODE3);
+    result = buildClusterState(results, "csr*r2sr3cr2", NODE1, NODE2, NODE3);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE3_URL, createUrl);
   }
@@ -148,253 +119,73 @@ public class SharedFSAutoReplicaFailoverUtilsTest extends SolrTestCaseJ4 {
   
   @Test
   public void testGetBestCreateUrlMultipleCollections3() {
-    Result result = buildClusterState("csr5r1sr4r2sr3r6csr2*r6sr5r3sr4r3", NODE1, NODE4, NODE5, NODE6);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr5r1sr4r2sr3r6csr2*r6sr5r3sr4r3", NODE1, NODE4, NODE5, NODE6);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE1_URL, createUrl);
   }
   
   @Test
   public void testGetBestCreateUrlMultipleCollections4() {
-    Result result = buildClusterState("csr1r4sr3r5sr2r6csr5r6sr4r6sr5*r4", NODE6);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr1r4sr3r5sr2r6csr5r6sr4r6sr5*r4", NODE6);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE6_URL, createUrl);
   }
   
   @Test
   public void testFailOverToEmptySolrInstance() {
-    Result result = buildClusterState("csr1*r1sr1csr1", NODE2);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr1*r1sr1csr1", NODE2);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE2_URL, createUrl);
   }
   
   @Test
   public void testFavorForeignSlices() {
-    Result result = buildClusterState("csr*sr2csr3r3", NODE2, NODE3);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr*sr2csr3r3", NODE2, NODE3);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE3_URL, createUrl);
     
-    result = buildClusterState("csr*sr2csr3r3r3r3r3r3r3", NODE2, NODE3);
+    result = buildClusterState(results, "csr*sr2csr3r3r3r3r3r3r3", NODE2, NODE3);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE2_URL, createUrl);
   }
 
   @Test
   public void testCollectionMaxNodesPerShard() {
-    Result result = buildClusterState("csr*sr2", 1, 1, NODE2);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr*sr2", 1, 1, NODE2);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertNull(createUrl);
 
-    result = buildClusterState("csr*sr2", 1, 2, NODE2);
+    result = buildClusterState(results, "csr*sr2", 1, 2, NODE2);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE2_URL, createUrl);
 
-    result = buildClusterState("csr*csr2r2", 1, 1, NODE2);
+    result = buildClusterState(results, "csr*csr2r2", 1, 1, NODE2);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, null);
     assertEquals(NODE2_URL, createUrl);
   }
 
   @Test
   public void testMaxCoresPerNode() {
-    Result result = buildClusterState("csr*sr2", 1, 1, NODE2);
+    ClusterStateMockUtil.Result result = buildClusterState(results, "csr*sr2", 1, 1, NODE2);
     String createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, 1);
     assertNull(createUrl);
 
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, 2);
     assertNull(createUrl);
 
-    result = buildClusterState("csr*sr2", 1, 2, NODE2);
+    result = buildClusterState(results, "csr*sr2", 1, 2, NODE2);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, 2);
     assertEquals(NODE2_URL, createUrl);
 
-    result = buildClusterState("csr*sr2sr3sr4", 1, 1, NODE2, NODE3, NODE4);
+    result = buildClusterState(results, "csr*sr2sr3sr4", 1, 1, NODE2, NODE3, NODE4);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, 1);
     assertNull(createUrl);
 
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, 2);
     assertNull(createUrl);
 
-    result = buildClusterState("csr*sr2sr3sr4", 1, 2, NODE2, NODE3, NODE4);
+    result = buildClusterState(results, "csr*sr2sr3sr4", 1, 2, NODE2, NODE3, NODE4);
     createUrl = OverseerAutoReplicaFailoverThread.getBestCreateUrl(result.reader, result.badReplica, 2);
     assertTrue(createUrl.equals(NODE3_URL) || createUrl.equals(NODE4_URL));
   }
-  
-  private Result buildClusterState(String string, String ... liveNodes) {
-    return buildClusterState(string, 1, liveNodes);
-  }
-  
-  private Result buildClusterState(String string, int replicationFactor, String ... liveNodes) {
-    return buildClusterState(string, replicationFactor, 10, liveNodes);
-  }
-  
-  /**
-   * This method lets you construct a complex ClusterState object by using simple strings of letters.
-   * 
-   * c = collection, s = slice, r = replica, \d = node number (r2 means the replica is on node 2), 
-   * state = [A,R,D,F], * = replica to replace, binds to the left.
-   * 
-   * For example:
-   * csrr2rD*sr2csr
-   * 
-   * Creates:
-   * 
-   * 'csrr2rD*'
-   * A collection, a shard, a replica on node 1 (the default) that is active (the default), a replica on node 2, and a replica on node 1
-   * that has a state of down and is the replica we will be looking to put somewhere else (the *).
-   * 
-   * 'sr2'
-   * Then, another shard that has a replica on node 2.
-   * 
-   * 'csr'
-   * Then, another collection that has a shard with a single active replica on node 1.
-   * 
-   * Result:
-   *        {
-   *         "collection2":{
-   *           "maxShardsPerNode":"1",
-   *           "replicationFactor":"1",
-   *           "shards":{"slice1":{
-   *               "state":"active",
-   *               "replicas":{"replica5":{
-   *                   "state":"active",
-   *                   "node_name":"baseUrl1_",
-   *                   "base_url":"http://baseUrl1"}}}}},
-   *         "collection1":{
-   *           "maxShardsPerNode":"1",
-   *           "replicationFactor":"1",
-   *           "shards":{
-   *             "slice1":{
-   *               "state":"active",
-   *               "replicas":{
-   *                 "replica3 (bad)":{
-   *                   "state":"down",
-   *                   "node_name":"baseUrl1_",
-   *                   "base_url":"http://baseUrl1"},
-   *                 "replica2":{
-   *                   "state":"active",
-   *                   "node_name":"baseUrl2_",
-   *                   "base_url":"http://baseUrl2"},
-   *                 "replica1":{
-   *                   "state":"active",
-   *                   "node_name":"baseUrl1_",
-   *                   "base_url":"http://baseUrl1"}}},
-   *             "slice2":{
-   *               "state":"active",
-   *               "replicas":{"replica4":{
-   *                   "state":"active",
-   *                   "node_name":"baseUrl2_",
-   *                   "base_url":"http://baseUrl2"}}}}}}
-   * 
-   */
-  @SuppressWarnings("resource")
-  private Result buildClusterState(String clusterDescription, int replicationFactor, int maxShardsPerNode, String ... liveNodes) {
-    Result result = new Result();
-    
-    Map<String,Slice> slices = null;
-    Map<String,Replica> replicas = null;
-    Map<String,Object> collectionProps = new HashMap<>();
-    collectionProps.put(ZkStateReader.MAX_SHARDS_PER_NODE, Integer.toString(maxShardsPerNode));
-    collectionProps.put(ZkStateReader.REPLICATION_FACTOR, Integer.toString(replicationFactor));
-    Map<String,DocCollection> collectionStates = new HashMap<>();
-    DocCollection docCollection = null;
-    Slice slice = null;
-    int replicaCount = 1;
-    
-    Matcher m = BLUEPRINT.matcher(clusterDescription);
-    while (m.find()) {
-      Replica replica;
-      switch (m.group(1)) {
-        case "c":
-          slices = new HashMap<>();
-          docCollection = new DocCollection("collection" + (collectionStates.size() + 1), slices, collectionProps, null);
-          collectionStates.put(docCollection.getName(), docCollection);
-          break;
-        case "s":
-          replicas = new HashMap<>();
-          slice = new Slice("slice" + (slices.size() + 1), replicas, null);
-          slices.put(slice.getName(), slice);
-          break;
-        case "r":
-          Map<String,Object> replicaPropMap = new HashMap<>();
-          String node;
-
-          node = m.group(2);
-          
-          if (node == null || node.trim().length() == 0) {
-            node = "1";
-          }
-          
-          Replica.State state = Replica.State.ACTIVE;
-          String stateCode = m.group(3);
-
-          if (stateCode != null) {
-            switch (stateCode.charAt(0)) {
-              case 'S':
-                state = Replica.State.ACTIVE;
-                break;
-              case 'R':
-                state = Replica.State.RECOVERING;
-                break;
-              case 'D':
-                state = Replica.State.DOWN;
-                break;
-              case 'F':
-                state = Replica.State.RECOVERY_FAILED;
-                break;
-              default:
-                throw new IllegalArgumentException(
-                    "Unexpected state for replica: " + stateCode);
-            }
-          }
-          
-          String nodeName = "baseUrl" + node + "_";
-          String replicaName = "replica" + replicaCount++;
-          
-          if ("*".equals(m.group(4))) {
-            replicaName += " (bad)";
-          }
-          
-          replicaPropMap.put(ZkStateReader.NODE_NAME_PROP, nodeName);
-          replicaPropMap.put(ZkStateReader.BASE_URL_PROP, "http://baseUrl" + node);
-          replicaPropMap.put(ZkStateReader.STATE_PROP, state.toString());
-          
-          replica = new Replica(replicaName, replicaPropMap);
-          
-          if ("*".equals(m.group(4))) {
-            result.badReplica = new DownReplica();
-            result.badReplica.replica = replica;
-            result.badReplica.slice = slice;
-            result.badReplica.collection = docCollection;
-          }
-          
-          replicas.put(replica.getName(), replica);
-          break;
-        default:
-          break;
-      }
-    }
-  
-    // trunk briefly had clusterstate taking a zkreader :( this was required to work around that - leaving
-    // until that issue is resolved.
-    MockZkStateReader reader = new MockZkStateReader(null, collectionStates.keySet());
-    ClusterState clusterState = new ClusterState(1, new HashSet<>(Arrays.asList(liveNodes)), collectionStates);
-    reader = new MockZkStateReader(clusterState, collectionStates.keySet());
-    
-    String json;
-    try {
-      json = new String(Utils.toJSON(clusterState), "UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      throw new RuntimeException("Unexpected");
-    }
-    System.err.println("build:" + buildNumber++);
-    System.err.println(json);
-    
-    assert result.badReplica != null : "Is there no bad replica?";
-    assert result.badReplica.slice != null : "Is there no bad replica?";
-    
-    result.reader = reader;
-    
-    results.add(result);
-
-    return result;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
index 1c1c5c1..d7b9d8a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
@@ -23,6 +23,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.codahale.metrics.Counter;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.BaseDistributedSearchTestCase;
 import org.apache.solr.SolrTestCaseJ4;
@@ -39,6 +40,7 @@ import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.request.SolrRequestHandler;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -109,10 +111,13 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
     Map<String, Integer> shardVsCount = new HashMap<>();
     for (JettySolrRunner runner : jettys) {
       CoreContainer container = runner.getCoreContainer();
+      SolrMetricManager metricManager = container.getMetricManager();
       for (SolrCore core : container.getCores()) {
+        String registry = core.getCoreMetricManager().getRegistryName();
+        Counter cnt = metricManager.counter(null, registry, "requests", "QUERY.standard");
         SolrRequestHandler select = core.getRequestHandler("");
-        long c = (long) select.getStatistics().get("requests");
-        shardVsCount.put(core.getName(), (int) c);
+//        long c = (long) select.getStatistics().get("requests");
+        shardVsCount.put(core.getName(), (int) cnt.getCount());
       }
     }
 
@@ -190,6 +195,10 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
       }
       assertNotNull(leaderCore);
 
+      SolrMetricManager leaderMetricManager = leaderCore.getCoreContainer().getMetricManager();
+      String leaderRegistry = leaderCore.getCoreMetricManager().getRegistryName();
+      Counter cnt = leaderMetricManager.counter(null, leaderRegistry, "requests", "QUERY.standard");
+
       // All queries should be served by the active replica
       // To make sure that's true we keep querying the down replica
       // If queries are getting processed by the down replica then the cluster state hasn't updated for that replica
@@ -200,8 +209,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
         count++;
         client.query(new SolrQuery("*:*"));
 
-        SolrRequestHandler select = leaderCore.getRequestHandler("");
-        long c = (long) select.getStatistics().get("requests");
+        long c = cnt.getCount();
 
         if (c == 1) {
           break; // cluster state has got update locally
@@ -222,8 +230,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
         client.query(new SolrQuery("*:*"));
         count++;
 
-        SolrRequestHandler select = leaderCore.getRequestHandler("");
-        long c = (long) select.getStatistics().get("requests");
+        long c = cnt.getCount();
 
         assertEquals("Query wasn't served by leader", count, c);
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
index 1b830ad..58d499b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
@@ -16,15 +16,37 @@
  */
 package org.apache.solr.cloud.hdfs;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
 import com.carrotsearch.randomizedtesting.annotations.Nightly;
 import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Metric;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
 import org.apache.solr.cloud.CollectionsAPIDistributedZkTest;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.util.BadHdfsThreadsFilter;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Test;
 
 @Slow
 @Nightly
@@ -59,4 +81,96 @@ public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistribut
     System.clearProperty("solr.hdfs.home");
   }
 
+  @Test
+  public void moveReplicaTest() throws Exception {
+    cluster.waitForAllNodes(5000);
+    String coll = "movereplicatest_coll";
+
+    CloudSolrClient cloudClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
+    create.setMaxShardsPerNode(2);
+    cloudClient.request(create);
+
+    for (int i = 0; i < 10; i++) {
+      cloudClient.add(coll, sdoc("id",String.valueOf(i)));
+      cloudClient.commit(coll);
+    }
+
+    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
+    Collections.shuffle(slices, random());
+    Slice slice = null;
+    Replica replica = null;
+    for (Slice s : slices) {
+      slice = s;
+      for (Replica r : s.getReplicas()) {
+        if (s.getLeader() != r) {
+          replica = r;
+        }
+      }
+    }
+    String dataDir = getDataDir(replica);
+
+    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    ArrayList<String> l = new ArrayList<>(liveNodes);
+    Collections.shuffle(l, random());
+    String targetNode = null;
+    for (String node : liveNodes) {
+      if (!replica.getNodeName().equals(node)) {
+        targetNode = node;
+        break;
+      }
+    }
+    assertNotNull(targetNode);
+
+    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
+    moveReplica.process(cloudClient);
+
+    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
+    checkNumOfCores(cloudClient, targetNode, 2);
+
+    waitForState("Wait for recovery finish failed",coll, clusterShape(2,2));
+    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
+    boolean found = false;
+    for (Replica newReplica : slice.getReplicas()) {
+      if (getDataDir(newReplica).equals(dataDir)) {
+        found = true;
+      }
+    }
+    assertTrue(found);
+
+
+    // data dir is reused so replication will be skipped
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
+      List<String> registryNames = manager.registryNames().stream()
+          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
+      for (String registry : registryNames) {
+        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
+        Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
+        if (counter != null) {
+          assertEquals(0, counter.getCount());
+        }
+      }
+    }
+  }
+
+
+  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
+    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
+  }
+
+  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
+      return status.getCoreStatus().size();
+    }
+  }
+
+  private String getDataDir(Replica replica) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(replica.getBaseUrl())) {
+      CoreStatus status = CoreAdminRequest.getCoreStatus(replica.getCoreName(), coreclient);
+      return status.getDataDirectory();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d8df9f8c/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java b/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java
index 5f0d537..aa42664 100644
--- a/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java
+++ b/solr/core/src/test/org/apache/solr/core/ExitableDirectoryReaderTest.java
@@ -19,7 +19,7 @@ package org.apache.solr.core;
 import java.util.Map;
 
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.util.NamedList;
+import org.apache.solr.metrics.MetricsMap;
 import org.apache.solr.response.SolrQueryResponse;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -88,11 +88,11 @@ public class ExitableDirectoryReaderTest extends SolrTestCaseJ4 {
   public void testCacheAssumptions() throws Exception {
     String fq= "name:d*";
     SolrCore core = h.getCore();
-    SolrInfoMBean filterCacheStats = core.getInfoRegistry().get("filterCache");
-    long fqInserts = (long) filterCacheStats.getStatistics().get("inserts");
+    MetricsMap filterCacheStats = (MetricsMap)core.getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.filterCache");
+    long fqInserts = (long) filterCacheStats.getValue().get("inserts");
 
-    SolrInfoMBean queryCacheStats = core.getInfoRegistry().get("queryResultCache");
-    long qrInserts = (long) queryCacheStats.getStatistics().get("inserts");
+    MetricsMap queryCacheStats = (MetricsMap)core.getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.queryResultCache");
+    long qrInserts = (long) queryCacheStats.getValue().get("inserts");
 
     // This gets 0 docs back. Use 10000 instead of 1 for timeAllowed and it gets 100 back and the for loop below
     // succeeds.
@@ -105,16 +105,16 @@ public class ExitableDirectoryReaderTest extends SolrTestCaseJ4 {
     assertTrue("Should have partial results", (Boolean) (header.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY)));
 
     assertEquals("Should NOT have inserted partial results in the cache!",
-        (long) queryCacheStats.getStatistics().get("inserts"), qrInserts);
+        (long) queryCacheStats.getValue().get("inserts"), qrInserts);
 
-    assertEquals("Should NOT have another insert", fqInserts, (long) filterCacheStats.getStatistics().get("inserts"));
+    assertEquals("Should NOT have another insert", fqInserts, (long) filterCacheStats.getValue().get("inserts"));
 
     // At the end of all this, we should have no hits in the queryResultCache.
     response = JQ(req("q", "*:*", "fq", fq, "indent", "true", "timeAllowed", longTimeout));
 
     // Check that we did insert this one.
-    assertEquals("Hits should still be 0", (long) filterCacheStats.getStatistics().get("hits"), 0L);
-    assertEquals("Inserts should be bumped", (long) filterCacheStats.getStatistics().get("inserts"), fqInserts + 1);
+    assertEquals("Hits should still be 0", (long) filterCacheStats.getValue().get("hits"), 0L);
+    assertEquals("Inserts should be bumped", (long) filterCacheStats.getValue().get("inserts"), fqInserts + 1);
 
     res = (Map) ObjectBuilder.fromJSON(response);
     body = (Map) (res.get("response"));
@@ -130,14 +130,14 @@ public class ExitableDirectoryReaderTest extends SolrTestCaseJ4 {
   public void testQueryResults() throws Exception {
     String q = "name:e*";
     SolrCore core = h.getCore();
-    SolrInfoMBean queryCacheStats = core.getInfoRegistry().get("queryResultCache");
-    NamedList nl = queryCacheStats.getStatistics();
+    MetricsMap queryCacheStats = (MetricsMap)core.getCoreMetricManager().getRegistry().getMetrics().get("CACHE.searcher.queryResultCache");
+    Map<String,Object> nl = queryCacheStats.getValue();
     long inserts = (long) nl.get("inserts");
 
     String response = JQ(req("q", q, "indent", "true", "timeAllowed", "1", "sleep", sleep));
 
     // The queryResultCache should NOT get an entry here.
-    nl = queryCacheStats.getStatistics();
+    nl = queryCacheStats.getValue();
     assertEquals("Should NOT have inserted partial results!", inserts, (long) nl.get("inserts"));
 
     Map res = (Map) ObjectBuilder.fromJSON(response);
@@ -150,7 +150,7 @@ public class ExitableDirectoryReaderTest extends SolrTestCaseJ4 {
     response = JQ(req("q", q, "indent", "true", "timeAllowed", longTimeout));
 
     // Check that we did insert this one.
-    NamedList nl2 = queryCacheStats.getStatistics();
+    Map<String,Object> nl2 = queryCacheStats.getValue();
     assertEquals("Hits should still be 0", (long) nl.get("hits"), (long) nl2.get("hits"));
     assertTrue("Inserts should be bumped", inserts < (long) nl2.get("inserts"));