You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sh...@apache.org on 2017/06/25 02:06:26 UTC

[04/47] lucene-solr:feature/autoscaling: SOLR-8256: Set legacyCloud=false as default

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
index 2d327a2..f6abb54 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
@@ -29,7 +29,6 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -47,13 +46,11 @@ import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.CloudConfig;
 import org.apache.solr.handler.component.HttpShardHandlerFactory;
 import org.apache.solr.update.UpdateShardHandler;
 import org.apache.solr.update.UpdateShardHandlerConfig;
-import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
@@ -131,8 +128,20 @@ public class OverseerTest extends SolrTestCaseJ4 {
       zkStateReader.close();
       zkClient.close();
     }
-    
-    public String publishState(String collection, String coreName, String coreNodeName, Replica.State stateName, int numShards)
+
+    public void createCollection(String collection, int numShards) throws KeeperException, InterruptedException {
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
+          "name", collection,
+          ZkStateReader.REPLICATION_FACTOR, "1",
+          ZkStateReader.NUM_SHARDS_PROP, numShards+"",
+          "createNodeSet", "");
+      DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
+      q.offer(Utils.toJSON(m));
+
+    }
+
+    public String publishState(String collection, String coreName, String coreNodeName, String shard, Replica.State stateName, int numShards)
         throws KeeperException, InterruptedException, IOException {
       if (stateName == null) {
         ElectionContext ec = electionContext.remove(coreName);
@@ -144,22 +153,23 @@ public class OverseerTest extends SolrTestCaseJ4 {
             ZkStateReader.CORE_NAME_PROP, coreName,
             ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
             ZkStateReader.COLLECTION_PROP, collection);
-            DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
-            q.offer(Utils.toJSON(m));
-         return null;
+        DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
+        q.offer(Utils.toJSON(m));
+        return null;
       } else {
         ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
-        ZkStateReader.STATE_PROP, stateName.toString(),
-        ZkStateReader.NODE_NAME_PROP, nodeName,
-        ZkStateReader.CORE_NAME_PROP, coreName,
-        ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
-        ZkStateReader.COLLECTION_PROP, collection,
-        ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards),
-        ZkStateReader.BASE_URL_PROP, "http://" + nodeName + "/solr/");
+            ZkStateReader.STATE_PROP, stateName.toString(),
+            ZkStateReader.NODE_NAME_PROP, nodeName,
+            ZkStateReader.CORE_NAME_PROP, coreName,
+            ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
+            ZkStateReader.COLLECTION_PROP, collection,
+            ZkStateReader.SHARD_ID_PROP, shard,
+            ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards),
+            ZkStateReader.BASE_URL_PROP, "http://" + nodeName + "/solr/");
         DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
         q.offer(Utils.toJSON(m));
       }
-      
+
       if (collection.length() > 0) {
         for (int i = 0; i < 120; i++) {
           String shardId = getShardId(collection, coreNodeName);
@@ -193,7 +203,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       }
       return null;
     }
-    
+
     private String getShardId(String collection, String coreNodeName) {
       Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(collection);
       if (slices != null) {
@@ -257,7 +267,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       server.run();
       AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
       AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
-      
+
       zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
       ZkController.createClusterZkNodes(zkClient);
 
@@ -265,24 +275,32 @@ public class OverseerTest extends SolrTestCaseJ4 {
 
       ZkStateReader reader = new ZkStateReader(zkClient);
       reader.createClusterStateWatchersAndUpdate();
-      
+
       zkController = new MockZKController(server.getZkAddress(), "127.0.0.1");
 
       final int numShards=6;
-      
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
+          "name", COLLECTION,
+          ZkStateReader.REPLICATION_FACTOR, "1",
+          ZkStateReader.NUM_SHARDS_PROP, "3",
+          "createNodeSet", "");
+      DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
+      q.offer(Utils.toJSON(m));
+
       for (int i = 0; i < numShards; i++) {
-        assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
+        assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), "shard"+((i%3)+1), Replica.State.ACTIVE, 3));
       }
       final Map<String,Replica> rmap = reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap();
       assertEquals(rmap.toString(), 2, rmap.size());
       assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard2").getReplicasMap().size());
       assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard3").getReplicasMap().size());
-      
+
       //make sure leaders are in cloud state
       assertNotNull(reader.getLeaderUrl(COLLECTION, "shard1", 15000));
       assertNotNull(reader.getLeaderUrl(COLLECTION, "shard2", 15000));
       assertNotNull(reader.getLeaderUrl(COLLECTION, "shard3", 15000));
-      
+
     } finally {
       close(zkClient);
       if (zkController != null) {
@@ -319,9 +337,10 @@ public class OverseerTest extends SolrTestCaseJ4 {
       zkController = new MockZKController(server.getZkAddress(), "127.0.0.1");
 
       final int numShards=3;
-      
+      zkController.createCollection(COLLECTION, 3);
       for (int i = 0; i < numShards; i++) {
-        assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
+        assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1),
+            "node" + (i+1), "shard"+((i%3)+1) , Replica.State.ACTIVE, 3));
       }
 
       assertEquals(1, reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap().size());
@@ -335,12 +354,14 @@ public class OverseerTest extends SolrTestCaseJ4 {
       
       // publish a bad queue item
       String emptyCollectionName = "";
-      zkController.publishState(emptyCollectionName, "core0", "node0", Replica.State.ACTIVE, 1);
-      zkController.publishState(emptyCollectionName, "core0", "node0", null, 1);
-      
+      zkController.publishState(emptyCollectionName, "core0", "node0", "shard1",  Replica.State.ACTIVE, 1);
+      zkController.publishState(emptyCollectionName, "core0", "node0", "shard1", null, 1);
+
+      zkController.createCollection("collection2", 3);
       // make sure the Overseer is still processing items
       for (int i = 0; i < numShards; i++) {
-        assertNotNull("shard got no id?", zkController.publishState("collection2", "core" + (i + 1), "node" + (i + 1), Replica.State.ACTIVE, 3));
+        assertNotNull("shard got no id?", zkController.publishState("collection2",
+            "core" + (i + 1), "node" + (i + 1),"shard"+((i%3)+1), Replica.State.ACTIVE, 3));
       }
 
       assertEquals(1, reader.getClusterState().getSlice("collection2", "shard1").getReplicasMap().size());
@@ -361,147 +382,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
       server.shutdown();
     }
   }
-  
-  @Test
-  public void testShardAssignmentBigger() throws Exception {
-    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
-
-    final int nodeCount = random().nextInt(TEST_NIGHTLY ? 50 : 10)+(TEST_NIGHTLY ? 50 : 10)+1;   //how many simulated nodes (num of threads)
-    final int coreCount = random().nextInt(TEST_NIGHTLY ? 100 : 11)+(TEST_NIGHTLY ? 100 : 11)+1; //how many cores to register
-    final int sliceCount = random().nextInt(TEST_NIGHTLY ? 20 : 5)+1;  //how many slices
-    
-    ZkTestServer server = new ZkTestServer(zkDir);
-
-    SolrZkClient zkClient = null;
-    ZkStateReader reader = null;
-    SolrZkClient overseerClient = null;
-
-    final MockZKController[] controllers = new MockZKController[nodeCount];
-    final ExecutorService[] nodeExecutors = new ExecutorService[nodeCount];
-    try {
-      server.run();
-      AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
-      AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
-
-      zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
-      ZkController.createClusterZkNodes(zkClient);
-      
-      overseerClient = electNewOverseer(server.getZkAddress());
-
-      reader = new ZkStateReader(zkClient);
-      reader.createClusterStateWatchersAndUpdate();
-
-      for (int i = 0; i < nodeCount; i++) {
-        controllers[i] = new MockZKController(server.getZkAddress(), "node" + i);
-      }      
-      for (int i = 0; i < nodeCount; i++) {
-        nodeExecutors[i] = ExecutorUtil.newMDCAwareFixedThreadPool(1, new DefaultSolrThreadFactory("testShardAssignment"));
-      }
-      
-      final String[] ids = new String[coreCount];
-      //register total of coreCount cores
-      for (int i = 0; i < coreCount; i++) {
-        final int slot = i;
-
-        nodeExecutors[i % nodeCount].submit((Runnable) () -> {
-
-          final String coreName = "core" + slot;
-
-          try {
-            ids[slot] = controllers[slot % nodeCount].publishState(COLLECTION, coreName, "node" + slot, Replica.State.ACTIVE, sliceCount);
-          } catch (Throwable e) {
-            e.printStackTrace();
-            fail("register threw exception:" + e.getClass());
-          }
-        });
-      }
-      
-      for (int i = 0; i < nodeCount; i++) {
-        nodeExecutors[i].shutdown();
-      }
-
-      for (int i = 0; i < nodeCount; i++) {
-        while (!nodeExecutors[i].awaitTermination(100, TimeUnit.MILLISECONDS));
-      }
-      
-      // make sure all cores have been assigned a id in cloudstate
-      int cloudStateSliceCount = 0;
-      for (int i = 0; i < 40; i++) {
-        cloudStateSliceCount = 0;
-        ClusterState state = reader.getClusterState();
-        final Map<String,Slice> slices = state.getSlicesMap(COLLECTION);
-        if (slices != null) {
-          for (String name : slices.keySet()) {
-            cloudStateSliceCount += slices.get(name).getReplicasMap().size();
-          }
-          if (coreCount == cloudStateSliceCount) break;
-        }
-
-        Thread.sleep(200);
-      }
-      assertEquals("Unable to verify all cores have been assigned an id in cloudstate",
-                   coreCount, cloudStateSliceCount);
-
-      // make sure all cores have been returned an id
-      int assignedCount = 0;
-      for (int i = 0; i < 240; i++) {
-        assignedCount = 0;
-        for (int j = 0; j < coreCount; j++) {
-          if (ids[j] != null) {
-            assignedCount++;
-          }
-        }
-        if (coreCount == assignedCount) {
-          break;
-        }
-        Thread.sleep(1000);
-      }
-      assertEquals("Unable to verify all cores have been returned an id", 
-                   coreCount, assignedCount);
-      
-      final HashMap<String, AtomicInteger> counters = new HashMap<>();
-      for (int i = 1; i < sliceCount+1; i++) {
-        counters.put("shard" + i, new AtomicInteger());
-      }
-      
-      for (int i = 0; i < coreCount; i++) {
-        final AtomicInteger ai = counters.get(ids[i]);
-        assertNotNull("could not find counter for shard:" + ids[i], ai);
-        ai.incrementAndGet();
-      }
-
-      for (String counter: counters.keySet()) {
-        int count = counters.get(counter).intValue();
-        int expectedCount = coreCount / sliceCount;
-        int min = expectedCount - 1;
-        int max = expectedCount + 1;
-        if (count < min || count > max) {
-          fail("Unevenly assigned shard ids, " + counter + " had " + count
-              + ", expected: " + min + "-" + max);
-        }
-      }
-      
-      //make sure leaders are in cloud state
-      for (int i = 0; i < sliceCount; i++) {
-        assertNotNull(reader.getLeaderUrl(COLLECTION, "shard" + (i + 1), 15000));
-      }
-
-    } finally {
-      close(zkClient);
-      close(overseerClient);
-      close(reader);
-      for (int i = 0; i < controllers.length; i++)
-        if (controllers[i] != null) {
-          controllers[i].close();
-        }
-      server.shutdown();
-      for (int i = 0; i < nodeCount; i++) {
-        if (nodeExecutors[i] != null) {
-          nodeExecutors[i].shutdownNow();
-        }
-      }
-    }
-  }
 
   //wait until collections are available
   private void waitForCollections(ZkStateReader stateReader, String... collections) throws InterruptedException, KeeperException {
@@ -545,11 +425,19 @@ public class OverseerTest extends SolrTestCaseJ4 {
       overseerClient = electNewOverseer(server.getZkAddress());
 
       DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
-      
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
+          "name", COLLECTION,
+          ZkStateReader.REPLICATION_FACTOR, "1",
+          ZkStateReader.NUM_SHARDS_PROP, "1",
+          "createNodeSet", "");
+      q.offer(Utils.toJSON(m));
+
+      m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.COLLECTION_PROP, COLLECTION,
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.CORE_NAME_PROP, "core1",
           ZkStateReader.ROLES_PROP, "",
           ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
@@ -557,15 +445,14 @@ public class OverseerTest extends SolrTestCaseJ4 {
       q.offer(Utils.toJSON(m));
       
       waitForCollections(reader, COLLECTION);
-
-      assertSame(reader.getClusterState().toString(), Replica.State.RECOVERING,
-          reader.getClusterState().getSlice(COLLECTION, "shard1").getReplica("core_node1").getState());
+      verifyReplicaStatus(reader, "collection1", "shard1", "core_node1", Replica.State.RECOVERING);
 
       //publish node state (active)
       m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.COLLECTION_PROP, COLLECTION,
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.CORE_NAME_PROP, "core1",
           ZkStateReader.ROLES_PROP, "",
           ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
@@ -634,7 +521,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
       overseerClient = electNewOverseer(server.getZkAddress());
       
       Thread.sleep(1000);
-      mockController.publishState(COLLECTION, core, core_node,
+      mockController.createCollection(COLLECTION, 1);
+      mockController.publishState(COLLECTION, core, core_node, "shard1",
           Replica.State.RECOVERING, numShards);
       
       waitForCollections(reader, COLLECTION);
@@ -642,7 +530,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       
       int version = getClusterStateVersion(zkClient);
       
-      mockController.publishState(COLLECTION, core, core_node, Replica.State.ACTIVE,
+      mockController.publishState(COLLECTION, core, core_node, "shard1", Replica.State.ACTIVE,
           numShards);
       
       while (version == getClusterStateVersion(zkClient));
@@ -652,7 +540,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       overseerClient.close();
       Thread.sleep(1000); // wait for overseer to get killed
       
-      mockController.publishState(COLLECTION, core, core_node,
+      mockController.publishState(COLLECTION, core, core_node, "shard1",
           Replica.State.RECOVERING, numShards);
       version = getClusterStateVersion(zkClient);
       
@@ -667,7 +555,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       assertEquals(shard+" replica count does not match", 1, reader.getClusterState()
           .getSlice(COLLECTION, shard).getReplicasMap().size());
       version = getClusterStateVersion(zkClient);
-      mockController.publishState(COLLECTION, core, core_node, null, numShards);
+      mockController.publishState(COLLECTION, core, core_node, "shard1", null, numShards);
       while (version == getClusterStateVersion(zkClient));
       Thread.sleep(500);
       assertTrue(COLLECTION +" should remain after removal of the last core", // as of SOLR-5209 core removal does not cascade to remove the slice and collection
@@ -723,7 +611,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
       overseerElector.setup(ec);
       overseerElector.joinElection(ec, false);
 
-      mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.ACTIVE, 1);
+      mockController.createCollection(COLLECTION, 1);
+      mockController.publishState(COLLECTION, "core1", "core_node1", "shard1", Replica.State.ACTIVE, 1);
 
       assertNotNull(overseer.getStats());
       assertTrue((overseer.getStats().getSuccessCount(OverseerAction.STATE.toLower())) > 0);
@@ -819,16 +708,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
       for (int i = 0; i < atLeast(4); i++) {
         killCounter.incrementAndGet(); //for each round allow 1 kill
         mockController = new MockZKController(server.getZkAddress(), "node1");
-        mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE,1);
+        mockController.createCollection(COLLECTION, 1);
+        mockController.publishState(COLLECTION, "core1", "node1", "shard1", Replica.State.ACTIVE,1);
         if(mockController2!=null) {
           mockController2.close();
           mockController2 = null;
         }
-        mockController.publishState(COLLECTION, "core1", "node1",Replica.State.RECOVERING,1);
+        mockController.publishState(COLLECTION, "core1", "node1","shard1", Replica.State.RECOVERING,1);
         mockController2 = new MockZKController(server.getZkAddress(), "node2");
-        mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE,1);
+        mockController.publishState(COLLECTION, "core1", "node1","shard1", Replica.State.ACTIVE,1);
         verifyShardLeader(reader, COLLECTION, "shard1", "core1");
-        mockController2.publishState(COLLECTION, "core4", "node2", Replica.State.ACTIVE ,1);
+        mockController2.publishState(COLLECTION, "core4", "node2", "shard1",  Replica.State.ACTIVE ,1);
         mockController.close();
         mockController = null;
         verifyShardLeader(reader, COLLECTION, "shard1", "core4");
@@ -874,7 +764,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
       
       overseerClient = electNewOverseer(server.getZkAddress());
 
-      mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.RECOVERING, 1);
+      mockController.createCollection(COLLECTION, 1);
+      mockController.publishState(COLLECTION, "core1", "core_node1", "shard1", Replica.State.RECOVERING, 1);
 
       waitForCollections(reader, "collection1");
 
@@ -885,7 +776,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       int version = getClusterStateVersion(controllerClient);
       
       mockController = new MockZKController(server.getZkAddress(), "node1");
-      mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.RECOVERING, 1);
+      mockController.publishState(COLLECTION, "core1", "core_node1","shard1", Replica.State.RECOVERING, 1);
 
       while (version == reader.getClusterState().getZkClusterStateVersion()) {
         Thread.sleep(100);
@@ -915,47 +806,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
   }
 
   @Test
-  public void testPlaceholders() throws Exception {
-    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
-    
-    ZkTestServer server = new ZkTestServer(zkDir);
-    
-    SolrZkClient controllerClient = null;
-    SolrZkClient overseerClient = null;
-    ZkStateReader reader = null;
-    MockZKController mockController = null;
-    
-    try {
-      server.run();
-      controllerClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
-      
-      AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
-      AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
-      ZkController.createClusterZkNodes(controllerClient);
-
-      reader = new ZkStateReader(controllerClient);
-      reader.createClusterStateWatchersAndUpdate();
-
-      mockController = new MockZKController(server.getZkAddress(), "node1");
-      
-      overseerClient = electNewOverseer(server.getZkAddress());
-
-      mockController.publishState(COLLECTION, "core1", "node1", Replica.State.RECOVERING, 12);
-
-      waitForCollections(reader, COLLECTION);
-      
-      assertEquals("Slicecount does not match", 12, reader.getClusterState().getSlices(COLLECTION).size());
-      
-    } finally {
-      close(overseerClient);
-      close(mockController);
-      close(controllerClient);
-      close(reader);
-      server.shutdown();
-    }
-  }
-
-  @Test
   @Ignore
   public void testPerformance() throws Exception {
     String zkDir = createTempDir("OverseerTest.testPerformance").toFile().getAbsolutePath();
@@ -1113,10 +963,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
       reader.createClusterStateWatchersAndUpdate();
       //prepopulate work queue with some items to emulate previous overseer died before persisting state
       DistributedQueue queue = Overseer.getInternalWorkQueue(zkClient, new Overseer.Stats());
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
+          "name", COLLECTION,
+          ZkStateReader.REPLICATION_FACTOR, "1",
+          ZkStateReader.NUM_SHARDS_PROP, "1",
+          "createNodeSet", "");
+      queue.offer(Utils.toJSON(m));
+      m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
-          ZkStateReader.SHARD_ID_PROP, "s1",
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core1",
           ZkStateReader.ROLES_PROP, "",
@@ -1125,7 +982,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state",
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
-          ZkStateReader.SHARD_ID_PROP, "s1",
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core2",
           ZkStateReader.ROLES_PROP, "",
@@ -1139,7 +996,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
-          ZkStateReader.SHARD_ID_PROP, "s1",
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core3",
           ZkStateReader.ROLES_PROP, "",
@@ -1147,12 +1004,12 @@ public class OverseerTest extends SolrTestCaseJ4 {
       queue.offer(Utils.toJSON(m));
       
       for(int i=0;i<100;i++) {
-        Slice s = reader.getClusterState().getSlice(COLLECTION, "s1");
+        Slice s = reader.getClusterState().getSlice(COLLECTION, "shard1");
         if(s!=null && s.getReplicasMap().size()==3) break;
         Thread.sleep(100);
       }
-      assertNotNull(reader.getClusterState().getSlice(COLLECTION, "s1"));
-      assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "s1").getReplicasMap().size());
+      assertNotNull(reader.getClusterState().getSlice(COLLECTION, "shard1"));
+      assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap().size());
     } finally {
       close(overseerClient);
       close(zkClient);
@@ -1188,8 +1045,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
 
       DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
 
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
+          "name", "c1",
+          ZkStateReader.REPLICATION_FACTOR, "1",
+          ZkStateReader.NUM_SHARDS_PROP, "1",
+          "createNodeSet", "");
+      q.offer(Utils.toJSON(m));
+
+      m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.COLLECTION_PROP, "c1",
           ZkStateReader.CORE_NAME_PROP, "core1",
@@ -1203,6 +1069,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
 
       m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.COLLECTION_PROP, "c1",
           ZkStateReader.CORE_NAME_PROP, "core1",
@@ -1214,6 +1081,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
 
       m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
+          ZkStateReader.SHARD_ID_PROP, "shard1",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.COLLECTION_PROP, "c1",
           ZkStateReader.CORE_NAME_PROP, "core1",
@@ -1379,6 +1247,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
           final int N = (numReplicas-rr)*numShards + ss;
           ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
               ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
+              ZkStateReader.SHARD_ID_PROP, "shard"+ss,
               ZkStateReader.NODE_NAME_PROP, "node"+N,
               ZkStateReader.COLLECTION_PROP, COLLECTION,
               ZkStateReader.CORE_NAME_PROP, "core"+N,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java
index d7fae92..edbeb50 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java
@@ -83,7 +83,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
     new CollectionAdminRequest.ReplaceNode(node2bdecommissioned, emptyNode).processAsync("000", cloudClient);
     CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("000");
     boolean success = false;
-    for (int i = 0; i < 200; i++) {
+    for (int i = 0; i < 300; i++) {
       CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
       if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
         success = true;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
index c5e35c3..78e4128 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingCustomTest.java
@@ -20,6 +20,8 @@ import java.io.File;
 
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.common.cloud.Replica;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -60,6 +62,16 @@ public class ShardRoutingCustomTest extends AbstractFullDistribZkTestBase {
     jettyDir.mkdirs();
     setupJettySolrHome(jettyDir);
     JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null);
+    assertEquals(0, CollectionAdminRequest
+        .createCollection(DEFAULT_COLLECTION, 1, 1)
+        .setStateFormat(Integer.parseInt(getStateFormat()))
+        .setCreateNodeSet("")
+        .process(cloudClient).getStatus());
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(collection,"shard1")
+        .setNode(j.getNodeName())
+        .setType(useTlogReplicas()? Replica.Type.TLOG: Replica.Type.NRT)
+        .process(cloudClient).isSuccess());
     jettys.add(j);
     SolrClient client = createNewSolrClient(j.getLocalPort());
     clients.add(client);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
index b77389b..c51904f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
@@ -21,8 +21,9 @@ import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.Replica;
@@ -32,12 +33,12 @@ import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.core.SolrCore;
 import org.apache.solr.update.DirectUpdateHandler2;
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.apache.solr.util.TimeOut;
 import org.junit.Test;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Random;
@@ -108,37 +109,28 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
     final String coreName1 = collection+"_1";
     final String coreName2 = collection+"_2";
 
-    // create one leader and one replica
-    Create createCmd = new Create();
-    createCmd.setCoreName(coreName1);
-    createCmd.setCollection(collection);
-    String coreDataDir = createTempDir().toFile().getAbsolutePath();
-    createCmd.setDataDir(getDataDir(coreDataDir));
-    createCmd.setNumShards(numShards);
-    
-    SolrClient client = clients.get(0);
-    String url1 = getBaseUrl(client);
-
-    try (HttpSolrClient adminClient = getHttpSolrClient(url1)) {
-      adminClient.setConnectionTimeout(15000);
-      adminClient.setSoTimeout(60000);
-      adminClient.request(createCmd);
+    assertEquals(0, CollectionAdminRequest.createCollection(collection, numShards, 1)
+        .setCreateNodeSet("")
+        .process(cloudClient).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+        .setCoreName(coreName1)
+        .setNode(jettys.get(0).getNodeName())
+        .process(cloudClient).isSuccess());
 
-      createCmd = new Create();
-      createCmd.setCoreName(coreName2);
-      createCmd.setCollection(collection);
-      coreDataDir = createTempDir().toFile().getAbsolutePath();
-      createCmd.setDataDir(getDataDir(coreDataDir));
+    assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard2")
+        .setCoreName(coreName2)
+        .setNode(jettys.get(0).getNodeName())
+        .process(cloudClient).isSuccess());
 
-      adminClient.request(createCmd);
 
-      // does not mean they are active and up yet :*
-      waitForRecoveriesToFinish(collection, false);
+    // does not mean they are active and up yet :*
+    waitForRecoveriesToFinish(collection, false);
 
-      final boolean unloadInOrder = random().nextBoolean();
-      final String unloadCmdCoreName1 = (unloadInOrder ? coreName1 : coreName2);
-      final String unloadCmdCoreName2 = (unloadInOrder ? coreName2 : coreName1);
+    final boolean unloadInOrder = random().nextBoolean();
+    final String unloadCmdCoreName1 = (unloadInOrder ? coreName1 : coreName2);
+    final String unloadCmdCoreName2 = (unloadInOrder ? coreName2 : coreName1);
 
+    try (HttpSolrClient adminClient = getHttpSolrClient(buildUrl(jettys.get(0).getLocalPort()))) {
       // now unload one of the two
       Unload unloadCmd = new Unload(false);
       unloadCmd.setCoreName(unloadCmdCoreName1);
@@ -163,42 +155,26 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
    * @throws Exception on any problem
    */
   private void testCoreUnloadAndLeaders() throws Exception {
-    File tmpDir = createTempDir().toFile();
-
-    String core1DataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection1" + "_1n";
+    JettySolrRunner jetty1 = jettys.get(0);
 
-    // create a new collection collection
-    SolrClient client = clients.get(0);
-    String url1 = getBaseUrl(client);
-    try (HttpSolrClient adminClient = getHttpSolrClient(url1)) {
-      adminClient.setConnectionTimeout(15000);
-      adminClient.setSoTimeout(60000);
-
-      Create createCmd = new Create();
-      createCmd.setCoreName("unloadcollection1");
-      createCmd.setCollection("unloadcollection");
-      createCmd.setNumShards(1);
-      createCmd.setDataDir(getDataDir(core1DataDir));
-      adminClient.request(createCmd);
-    }
+    assertEquals(0, CollectionAdminRequest
+        .createCollection("unloadcollection", 1,1)
+        .setCreateNodeSet(jetty1.getNodeName())
+        .process(cloudClient).getStatus());
     ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
     
     zkStateReader.forceUpdateCollection("unloadcollection");
 
     int slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
     assertEquals(1, slices);
-    
-    client = clients.get(1);
-    String url2 = getBaseUrl(client);
-    try (HttpSolrClient adminClient = getHttpSolrClient(url2)) {
-
-      Create createCmd = new Create();
-      createCmd.setCoreName("unloadcollection2");
-      createCmd.setCollection("unloadcollection");
-      String core2dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection1" + "_2n";
-      createCmd.setDataDir(getDataDir(core2dataDir));
-      adminClient.request(createCmd);
-    }
+    SolrCore solrCore = jetty1.getCoreContainer().getCore("unloadcollection_shard1_replica1");
+    String core1DataDir = solrCore.getDataDir();
+    solrCore.close();
+
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard("unloadcollection", "shard1")
+        .setNode(jettys.get(1).getNodeName())
+        .process(cloudClient).isSuccess());
     zkStateReader.forceUpdateCollection("unloadcollection");
     slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
     assertEquals(1, slices);
@@ -225,24 +201,17 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
       }
     }
 
-    // create another replica for our collection
-    client = clients.get(2);
-    String url3 = getBaseUrl(client);
-    try (HttpSolrClient adminClient = getHttpSolrClient(url3)) {
-      Create createCmd = new Create();
-      createCmd.setCoreName("unloadcollection3");
-      createCmd.setCollection("unloadcollection");
-      String core3dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection" + "_3n";
-      createCmd.setDataDir(getDataDir(core3dataDir));
-      adminClient.request(createCmd);
-    }
-    
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard("unloadcollection", "shard1")
+        .setNode(jettys.get(2).getNodeName())
+        .process(cloudClient).isSuccess());
+
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
-    
+
     // so that we start with some versions when we reload...
     DirectUpdateHandler2.commitOnClose = false;
-    
-    try (HttpSolrClient addClient = getHttpSolrClient(url3 + "/unloadcollection3")) {
+
+    try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3")) {
       addClient.setConnectionTimeout(30000);
 
       // add a few docs
@@ -276,11 +245,11 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
         fail("Leader never changed");
       }
     }
-    
+
     // ensure there is a leader
     zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
-    
-    try (HttpSolrClient addClient = getHttpSolrClient(url2 + "/unloadcollection2")) {
+
+    try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2")) {
       addClient.setConnectionTimeout(30000);
       addClient.setSoTimeout(90000);
 
@@ -291,23 +260,14 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
         addClient.add(doc1);
       }
     }
-    
-    // create another replica for our collection
-    client = clients.get(3);
-    String url4 = getBaseUrl(client);
-    try (HttpSolrClient adminClient = getHttpSolrClient(url4)) {
-      adminClient.setConnectionTimeout(15000);
-      adminClient.setSoTimeout(30000);
 
-      Create createCmd = new Create();
-      createCmd.setCoreName("unloadcollection4");
-      createCmd.setCollection("unloadcollection");
-      String core4dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection" + "_4n";
-      createCmd.setDataDir(getDataDir(core4dataDir));
-      adminClient.request(createCmd);
-    }
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard("unloadcollection", "shard1")
+        .setNode(jettys.get(3).getNodeName())
+        .process(cloudClient).isSuccess());
+
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
-    
+
     // unload the leader again
     leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
     try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
@@ -326,29 +286,22 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
         fail("Leader never changed");
       }
     }
-    
+
     zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
-    
-    
+
     // set this back
     DirectUpdateHandler2.commitOnClose = true;
-    
-    // bring the downed leader back as replica
-    try (HttpSolrClient adminClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
-      adminClient.setConnectionTimeout(15000);
-      adminClient.setSoTimeout(30000);
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard("unloadcollection", "shard1")
+        .setDataDir(core1DataDir)
+        .setNode(leaderProps.getNodeName())
+        .process(cloudClient).isSuccess());
 
-      Create createCmd = new Create();
-      createCmd.setCoreName(leaderProps.getCoreName());
-      createCmd.setCollection("unloadcollection");
-      createCmd.setDataDir(getDataDir(core1DataDir));
-      adminClient.request(createCmd);
-    }
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
 
     long found1, found3;
-    
-    try (HttpSolrClient adminClient = getHttpSolrClient(url2 + "/unloadcollection")) {
+
+    try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2")) {
       adminClient.setConnectionTimeout(15000);
       adminClient.setSoTimeout(30000);
       adminClient.commit();
@@ -356,7 +309,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
       q.set("distrib", false);
       found1 = adminClient.query(q).getResults().getNumFound();
     }
-    try (HttpSolrClient adminClient = getHttpSolrClient(url3 + "/unloadcollection")) {
+    try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3")) {
       adminClient.setConnectionTimeout(15000);
       adminClient.setSoTimeout(30000);
       adminClient.commit();
@@ -365,7 +318,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
       found3 = adminClient.query(q).getResults().getNumFound();
     }
 
-    try (HttpSolrClient adminClient = getHttpSolrClient(url4 + "/unloadcollection")) {
+    try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(3).getBaseUrl() + "/unloadcollection_shard1_replica4")) {
       adminClient.setConnectionTimeout(15000);
       adminClient.setSoTimeout(30000);
       adminClient.commit();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
index 1839881..162584c 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
@@ -109,7 +109,7 @@ public class TestConfigReload extends AbstractFullDistribZkTestBase {
     assertTrue(newStat.getVersion() > stat.getVersion());
     log.info("new_version "+ newStat.getVersion());
     Integer newVersion = newStat.getVersion();
-    long maxTimeoutSeconds = 20;
+    long maxTimeoutSeconds = 60;
     DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
     List<String> urls = new ArrayList<>();
     for (Slice slice : coll.getSlices()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java
index 3c7a90e..2427f19 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java
@@ -62,7 +62,7 @@ public class TestCloudManagedSchema extends AbstractFullDistribZkTestBase {
     NamedList namedListResponse = client.request(request);
     client.setBaseURL(previousBaseURL); // Restore baseURL 
     NamedList status = (NamedList)namedListResponse.get("status");
-    NamedList collectionStatus = (NamedList)status.get("collection1");
+    NamedList collectionStatus = (NamedList)status.getVal(0);
     String collectionSchema = (String)collectionStatus.get(CoreAdminParams.SCHEMA);
     // Make sure the upgrade to managed schema happened
     assertEquals("Schema resource name differs from expected name", "managed-schema", collectionSchema);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index ed5b622..740de34 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -1360,6 +1360,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
     protected String collection;
     protected String shard;
     protected String node;
+    protected String coreName;
     protected String routeKey;
     protected String instanceDir;
     protected String dataDir;
@@ -1426,6 +1427,11 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       return this;
     }
 
+    public AddReplica setCoreName(String coreName) {
+      this.coreName = coreName;
+      return this;
+    }
+
     @Override
     public SolrParams getParams() {
       ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
@@ -1446,6 +1452,9 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       if (dataDir != null)  {
         params.add("dataDir", dataDir);
       }
+      if (coreName != null) {
+        params.add("name", coreName);
+      }
       if (type != null) {
         params.add(ZkStateReader.REPLICA_TYPE, type.name());
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
index ed778a6..c46d346 100644
--- a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
@@ -1116,20 +1116,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 {
   protected void setupJettySolrHome(File jettyHome) throws IOException {
     seedSolrHome(jettyHome);
 
-    Properties coreProperties = new Properties();
-    coreProperties.setProperty("name", "collection1");
-    coreProperties.setProperty("shard", "${shard:}");
-    coreProperties.setProperty("collection", "${collection:collection1}");
-    coreProperties.setProperty("config", "${solrconfig:solrconfig.xml}");
-    coreProperties.setProperty("schema", "${schema:schema.xml}");
-    coreProperties.setProperty("coreNodeName", "${coreNodeName:}");
-    coreProperties.setProperty("replicaType", "${replicaType:}");
-
-    writeCoreProperties(jettyHome.toPath().resolve("cores").resolve("collection1"), coreProperties, "collection1");
-
-     //   <core name="collection1" instanceDir="collection1" shard="${shard:}"
-     // collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"
-    //coreNodeName="${coreNodeName:}"/>
+    Files.createDirectories(jettyHome.toPath().resolve("cores").resolve("collection1"));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
index 7141eed..b7c4162 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
@@ -25,6 +25,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.solr.BaseDistributedSearchTestCase;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
@@ -98,20 +99,15 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
     File controlHome = new File(new File(getSolrHome()).getParentFile(), "control" + homeCount.incrementAndGet());
     FileUtils.copyDirectory(new File(getSolrHome()), controlHome);
     setupJettySolrHome(controlHome);
-    
-    System.setProperty("collection", "control_collection");
-    String numShardsS = System.getProperty(ZkStateReader.NUM_SHARDS_PROP);
-    System.setProperty(ZkStateReader.NUM_SHARDS_PROP, "1");
-    controlJetty = createJetty(controlHome, null);      // let the shardId default to shard1
-    System.clearProperty("collection");
-    if(numShardsS != null) {
-      System.setProperty(ZkStateReader.NUM_SHARDS_PROP, numShardsS);
-    } else {
-      System.clearProperty(ZkStateReader.NUM_SHARDS_PROP);
-    }
 
+    controlJetty = createJetty(controlHome, null);      // let the shardId default to shard1
     controlClient = createNewSolrClient(controlJetty.getLocalPort());
 
+    assertTrue(CollectionAdminRequest
+        .createCollection("control_collection", 1, 1)
+        .setCreateNodeSet(controlJetty.getNodeName())
+        .process(controlClient).isSuccess());
+
     StringBuilder sb = new StringBuilder();
     for (int i = 1; i <= numShards; i++) {
       if (sb.length() > 0) sb.append(',');

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index cf6f2e1..f3d9d8a 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -62,7 +62,6 @@ import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -234,9 +233,15 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
       System.clearProperty("urlScheme");
       try (ZkStateReader zkStateReader = new ZkStateReader(zkServer.getZkAddress(),
           AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT)) {
-        zkStateReader.getZkClient().create(ZkStateReader.CLUSTER_PROPS,
-            Utils.toJSON(Collections.singletonMap("urlScheme", "https")),
-            CreateMode.PERSISTENT, true);
+        try {
+          zkStateReader.getZkClient().create(ZkStateReader.CLUSTER_PROPS,
+              Utils.toJSON(Collections.singletonMap("urlScheme", "https")),
+              CreateMode.PERSISTENT, true);
+        } catch (KeeperException.NodeExistsException e) {
+          ZkNodeProps props = ZkNodeProps.load(zkStateReader.getZkClient().getData(ZkStateReader.CLUSTER_PROPS,
+              null, null, true));
+          zkStateReader.getZkClient().setData(ZkStateReader.CLUSTER_PROPS, Utils.toJSON(props.plus("urlScheme", "https")), true);
+        }
       }
     }
     if (useTlogReplicas()) {
@@ -300,48 +305,31 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
   @Override
   protected void createServers(int numServers) throws Exception {
 
-    System.setProperty("collection", "control_collection");
-
-    // we want hashes by default for the control, so set to 1 shard as opposed to leaving unset
-    String oldNumShards = System.getProperty(ZkStateReader.NUM_SHARDS_PROP);
-    System.setProperty(ZkStateReader.NUM_SHARDS_PROP, "1");
-
-    try {
-
-      File controlJettyDir = createTempDir("control").toFile();
-      setupJettySolrHome(controlJettyDir);
-
-      controlJetty = createJetty(controlJettyDir, useJettyDataDir ? getDataDir(testDir
-          + "/control/data") : null); // don't pass shard name... let it default to
-                               // "shard1"
-
-
-      controlClient = createNewSolrClient(controlJetty.getLocalPort());
-      
-      if (sliceCount <= 0) {
-        // for now, just create the cloud client for the control if we don't
-        // create the normal cloud client.
-        // this can change if more tests need it.
-        controlClientCloud = createCloudClient("control_collection");
-        controlClientCloud.connect();
-        waitForCollection(controlClientCloud.getZkStateReader(),
-            "control_collection", 0);
-        // NOTE: we are skipping creation of the chaos monkey by returning here
-        cloudClient = controlClientCloud; // temporary - some code needs/uses
-                                          // cloudClient
-        return;
-      }
-
-    } finally {
-      System.clearProperty("collection");
-      if (oldNumShards != null) {
-        System.setProperty(ZkStateReader.NUM_SHARDS_PROP, oldNumShards);
-      } else {
-        System.clearProperty(ZkStateReader.NUM_SHARDS_PROP);
+    File controlJettyDir = createTempDir("control").toFile();
+    setupJettySolrHome(controlJettyDir);
+    controlJetty = createJetty(controlJettyDir, useJettyDataDir ? getDataDir(testDir
+        + "/control/data") : null);
+    try (SolrClient client = createCloudClient("control_collection")) {
+      assertEquals(0, CollectionAdminRequest
+          .createCollection("control_collection", 1, 1)
+          .setCreateNodeSet(controlJetty.getNodeName())
+          .process(client).getStatus());
       }
+    controlClient = new HttpSolrClient.Builder(controlJetty.getBaseUrl() + "/control_collection").build();
+    if (sliceCount <= 0) {
+      // for now, just create the cloud client for the control if we don't
+      // create the normal cloud client.
+      // this can change if more tests need it.
+      controlClientCloud = createCloudClient("control_collection");
+      controlClientCloud.connect();
+      waitForCollection(controlClientCloud.getZkStateReader(),
+          "control_collection", 0);
+      // NOTE: we are skipping creation of the chaos monkey by returning here
+      cloudClient = controlClientCloud; // temporary - some code needs/uses
+      // cloudClient
+      return;
     }
 
-
     initCloud();
 
     createJettys(numServers);
@@ -390,24 +378,13 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     List<SolrClient> clients = new ArrayList<>();
     StringBuilder sb = new StringBuilder();
 
-    if ("2".equals(getStateFormat())) {
-      log.info("Creating " + DEFAULT_COLLECTION + " with stateFormat=2");
-      SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(),
-          AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT);
-      Overseer.getStateUpdateQueue(zkClient).offer(
-          Utils.toJSON(Utils.makeMap(Overseer.QUEUE_OPERATION,
-              CollectionParams.CollectionAction.CREATE.toLower(), 
-              "name", DEFAULT_COLLECTION, 
-              "numShards", String.valueOf(sliceCount),
-              DocCollection.STATE_FORMAT, getStateFormat(),
-              ZkStateReader.NRT_REPLICAS, useTlogReplicas()?"0":"1",
-              ZkStateReader.TLOG_REPLICAS, useTlogReplicas()?"1":"0",
-              ZkStateReader.PULL_REPLICAS, String.valueOf(getPullReplicaCount()))));
-      zkClient.close();
-    }
+    assertEquals(0, CollectionAdminRequest
+        .createCollection(DEFAULT_COLLECTION, sliceCount, 1)
+        .setStateFormat(Integer.parseInt(getStateFormat()))
+        .setCreateNodeSet("")
+        .process(cloudClient).getStatus());
     
-    int numPullReplicas = getPullReplicaCount() * sliceCount;
-
+    int numOtherReplicas = numJettys - getPullReplicaCount() * sliceCount;
     for (int i = 1; i <= numJettys; i++) {
       if (sb.length() > 0) sb.append(',');
       int cnt = this.jettyIntCntr.incrementAndGet();
@@ -417,20 +394,37 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
       jettyDir.mkdirs();
       setupJettySolrHome(jettyDir);
       JettySolrRunner j;
-      
-      if (numPullReplicas > 0) {
-        numPullReplicas--;
+
+      if (numOtherReplicas > 0) {
+        numOtherReplicas--;
+        if (useTlogReplicas()) {
+          log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.TLOG);
+          j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
+              + cnt) : null, null, "solrconfig.xml", null, Replica.Type.TLOG);
+          assertTrue(CollectionAdminRequest
+              .addReplicaToShard(DEFAULT_COLLECTION, "shard"+((i%sliceCount)+1))
+              .setNode(j.getNodeName())
+              .setType(Replica.Type.TLOG)
+              .process(cloudClient).isSuccess());
+        } else {
+          log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.NRT);
+          j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
+              + cnt) : null, null, "solrconfig.xml", null, null);
+          assertTrue(CollectionAdminRequest
+              .addReplicaToShard(DEFAULT_COLLECTION, "shard"+((i%sliceCount)+1))
+              .setNode(j.getNodeName())
+              .setType(Replica.Type.NRT)
+              .process(cloudClient).isSuccess());
+        }
+      } else {
         log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.PULL);
         j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
             + cnt) : null, null, "solrconfig.xml", null, Replica.Type.PULL);
-      } else if (useTlogReplicas()) {
-        log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.TLOG);
-        j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
-            + cnt) : null, null, "solrconfig.xml", null, Replica.Type.TLOG);
-      } else {
-        log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.NRT);
-        j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
-            + cnt) : null, null, "solrconfig.xml", null, null);
+        assertTrue(CollectionAdminRequest
+            .addReplicaToShard(DEFAULT_COLLECTION, "shard"+((i%sliceCount)+1))
+            .setNode(j.getNodeName())
+            .setType(Replica.Type.PULL)
+            .process(cloudClient).isSuccess());
       }
       jettys.add(j);
       SolrClient client = createNewSolrClient(j.getLocalPort());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java
index 4c63bfb..7461c4c 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java
@@ -19,6 +19,7 @@ package org.apache.solr.cloud;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.Utils;
 import org.apache.zookeeper.CreateMode;
 import org.junit.AfterClass;
@@ -28,6 +29,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.lang.invoke.MethodHandles;
+import java.nio.charset.StandardCharsets;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -97,7 +99,9 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
     zkClient.makePath("/collections/collection1/shards", CreateMode.PERSISTENT, true);
     zkClient.makePath("/collections/control_collection", Utils.toJSON(zkProps), CreateMode.PERSISTENT, true);
     zkClient.makePath("/collections/control_collection/shards", CreateMode.PERSISTENT, true);
-
+    // this workaround is acceptable until we remove legacyCloud because we just init a single core here
+    String defaultClusterProps = "{\""+ZkStateReader.LEGACY_CLOUD+"\":\"true\"}";
+    zkClient.makePath(ZkStateReader.CLUSTER_PROPS, defaultClusterProps.getBytes(StandardCharsets.UTF_8), CreateMode.PERSISTENT, true);
     // for now, always upload the config and schema to the canonical names
     putConfig("conf1", zkClient, solrhome, config, "solrconfig.xml");
     putConfig("conf1", zkClient, solrhome, schema, "schema.xml");