You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2015/01/04 15:53:21 UTC

svn commit: r1649347 [23/31] - in /lucene/dev/branches/lucene6005: ./ dev-tools/ dev-tools/idea/solr/contrib/dataimporthandler-extras/ dev-tools/idea/solr/contrib/extraction/ dev-tools/idea/solr/contrib/map-reduce/ dev-tools/idea/solr/contrib/velocity/...

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java Sun Jan  4 14:53:12 2015
@@ -18,9 +18,9 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.RequestStatus;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.SplitShard;
@@ -76,7 +76,7 @@ public class MultiThreadedOCPTest extend
   }
 
   private void testParallelCollectionAPICalls() throws IOException, SolrServerException {
-    SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+    SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
 
     for(int i = 1 ; i <= NUM_COLLECTIONS ; i++) {
       Create createCollectionRequest = new Create();
@@ -84,7 +84,7 @@ public class MultiThreadedOCPTest extend
       createCollectionRequest.setNumShards(4);
       createCollectionRequest.setConfigName("conf1");
       createCollectionRequest.setAsyncId(String.valueOf(i));
-      createCollectionRequest.process(server);
+      createCollectionRequest.process(client);
     }
 
     boolean pass = false;
@@ -92,7 +92,7 @@ public class MultiThreadedOCPTest extend
     while(true) {
       int numRunningTasks = 0;
       for (int i = 1; i <= NUM_COLLECTIONS; i++)
-        if (getRequestState(i + "", server).equals("running"))
+        if (getRequestState(i + "", client).equals("running"))
           numRunningTasks++;
       if(numRunningTasks > 1) {
         pass = true;
@@ -107,38 +107,38 @@ public class MultiThreadedOCPTest extend
     }
     assertTrue("More than one tasks were supposed to be running in parallel but they weren't.", pass);
     for(int i=1;i<=NUM_COLLECTIONS;i++) {
-      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
+      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
       assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
     }
   }
 
   private void testTaskExclusivity() throws IOException, SolrServerException {
-    SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+    SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
     Create createCollectionRequest = new Create();
     createCollectionRequest.setCollectionName("ocptest_shardsplit");
     createCollectionRequest.setNumShards(4);
     createCollectionRequest.setConfigName("conf1");
     createCollectionRequest.setAsyncId("1000");
-    createCollectionRequest.process(server);
+    createCollectionRequest.process(client);
 
     SplitShard splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit");
     splitShardRequest.setShardName(SHARD1);
     splitShardRequest.setAsyncId("1001");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit");
     splitShardRequest.setShardName(SHARD2);
     splitShardRequest.setAsyncId("1002");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     int iterations = 0;
     while(true) {
       int runningTasks = 0;
       int completedTasks = 0;
       for (int i=1001;i<=1002;i++) {
-        String state = getRequestState(i, server);
+        String state = getRequestState(i, client);
         if (state.equals("running"))
           runningTasks++;
         if (state.equals("completed"))
@@ -161,45 +161,45 @@ public class MultiThreadedOCPTest extend
       }
     }
     for (int i=1001;i<=1002;i++) {
-      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
+      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
       assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
     }
   }
 
   private void testDeduplicationOfSubmittedTasks() throws IOException, SolrServerException {
-    SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+    SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
     Create createCollectionRequest = new Create();
     createCollectionRequest.setCollectionName("ocptest_shardsplit2");
     createCollectionRequest.setNumShards(4);
     createCollectionRequest.setConfigName("conf1");
     createCollectionRequest.setAsyncId("3000");
-    createCollectionRequest.process(server);
+    createCollectionRequest.process(client);
 
     SplitShard splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit2");
     splitShardRequest.setShardName(SHARD1);
     splitShardRequest.setAsyncId("3001");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit2");
     splitShardRequest.setShardName(SHARD2);
     splitShardRequest.setAsyncId("3002");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     // Now submit another task with the same id. At this time, hopefully the previous 3002 should still be in the queue.
     splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit2");
     splitShardRequest.setShardName(SHARD1);
     splitShardRequest.setAsyncId("3002");
-    CollectionAdminResponse response = splitShardRequest.process(server);
+    CollectionAdminResponse response = splitShardRequest.process(client);
 
     NamedList r = response.getResponse();
     assertEquals("Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.",
         "Task with the same requestid already exists.", r.get("error"));
 
     for (int i=3001;i<=3002;i++) {
-      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
+      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
       assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
     }
   }
@@ -224,16 +224,16 @@ public class MultiThreadedOCPTest extend
 
     try {
 
-      SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+      SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
       SplitShard splitShardRequest = new SplitShard();
       splitShardRequest.setCollectionName("collection1");
       splitShardRequest.setShardName(SHARD1);
       splitShardRequest.setAsyncId("2000");
-      splitShardRequest.process(server);
+      splitShardRequest.process(client);
 
-      String state = getRequestState("2000", server);
+      String state = getRequestState("2000", client);
       while (state.equals("submitted")) {
-        state = getRequestState("2000", server);
+        state = getRequestState("2000", client);
         Thread.sleep(10);
       }
       assertTrue("SplitShard task [2000] was supposed to be in [running] but isn't. It is [" + state + "]", state.equals("running"));
@@ -246,9 +246,9 @@ public class MultiThreadedOCPTest extend
       SolrRequest request = new QueryRequest(params);
       request.setPath("/admin/collections");
 
-      server.request(request);
+      client.request(request);
 
-      state = getRequestState("2000", server);
+      state = getRequestState("2000", client);
 
       assertTrue("After invoking OVERSEERSTATUS, SplitShard task [2000] was still supposed to be in [running] but isn't." +
           "It is [" + state + "]", state.equals("running"));
@@ -267,13 +267,13 @@ public class MultiThreadedOCPTest extend
     // todo - target diff servers and use cloud clients as well as non-cloud clients
   }
 
-  private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrServer server)
+  private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client)
       throws IOException, SolrServerException {
     String state = null;
     long maxWait = System.nanoTime() + TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS);
 
     while (System.nanoTime() < maxWait)  {
-      state = getRequestState(requestId, server);
+      state = getRequestState(requestId, client);
       if(state.equals("completed") || state.equals("failed"))
         return state;
       try {
@@ -285,14 +285,14 @@ public class MultiThreadedOCPTest extend
     return state;
   }
 
-  private String getRequestState(int requestId, SolrServer server) throws IOException, SolrServerException {
-    return getRequestState(String.valueOf(requestId), server);
+  private String getRequestState(int requestId, SolrClient client) throws IOException, SolrServerException {
+    return getRequestState(String.valueOf(requestId), client);
   }
 
-  private String getRequestState(String requestId, SolrServer server) throws IOException, SolrServerException {
+  private String getRequestState(String requestId, SolrClient client) throws IOException, SolrServerException {
     RequestStatus requestStatusRequest = new RequestStatus();
     requestStatusRequest.setRequestId(requestId);
-    CollectionAdminResponse response = requestStatusRequest.process(server);
+    CollectionAdminResponse response = requestStatusRequest.process(client);
 
     NamedList innerResponse = (NamedList) response.getResponse().get("status");
     return (String) innerResponse.get("state");

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java Sun Jan  4 14:53:12 2015
@@ -48,6 +48,7 @@ import org.junit.Test;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -387,29 +388,24 @@ public class OverseerCollectionProcessor
   }
   
   protected void issueCreateJob(Integer numberOfSlices,
-      Integer replicationFactor, Integer maxShardsPerNode, List<String> createNodeList, boolean sendCreateNodeList) {
-    ZkNodeProps props;
+      Integer replicationFactor, Integer maxShardsPerNode, List<String> createNodeList, boolean sendCreateNodeList, boolean createNodeSetShuffle) {
+    Map<String,Object> propMap = ZkNodeProps.makeMap(
+        Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
+        ZkStateReader.REPLICATION_FACTOR, replicationFactor.toString(),
+        "name", COLLECTION_NAME,
+        "collection.configName", CONFIG_NAME,
+        OverseerCollectionProcessor.NUM_SLICES, numberOfSlices.toString(),
+        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode.toString()
+    );
     if (sendCreateNodeList) {
-      props = new ZkNodeProps(Overseer.QUEUE_OPERATION,
-          CollectionParams.CollectionAction.CREATE.toLower(),
-          ZkStateReader.REPLICATION_FACTOR,
-          replicationFactor.toString(), "name", COLLECTION_NAME,
-          "collection.configName", CONFIG_NAME,
-          OverseerCollectionProcessor.NUM_SLICES, numberOfSlices.toString(),
-          ZkStateReader.MAX_SHARDS_PER_NODE,
-          maxShardsPerNode.toString(),
-          OverseerCollectionProcessor.CREATE_NODE_SET,
+      propMap.put(OverseerCollectionProcessor.CREATE_NODE_SET,
           (createNodeList != null)?StrUtils.join(createNodeList, ','):null);
-    } else {
-      props = new ZkNodeProps(Overseer.QUEUE_OPERATION,
-          CollectionParams.CollectionAction.CREATE.toLower(),
-          ZkStateReader.REPLICATION_FACTOR,
-          replicationFactor.toString(), "name", COLLECTION_NAME,
-          "collection.configName", CONFIG_NAME,
-          OverseerCollectionProcessor.NUM_SLICES, numberOfSlices.toString(),
-          ZkStateReader.MAX_SHARDS_PER_NODE,
-          maxShardsPerNode.toString());
+      if (OverseerCollectionProcessor.CREATE_NODE_SET_SHUFFLE_DEFAULT != createNodeSetShuffle || random().nextBoolean()) {
+        propMap.put(OverseerCollectionProcessor.CREATE_NODE_SET_SHUFFLE, createNodeSetShuffle);
+      }
     }
+
+    ZkNodeProps props = new ZkNodeProps(propMap);
     QueueEvent qe = new QueueEvent("id", ZkStateReader.toJSON(props), null){
       @Override
       public void setBytes(byte[] bytes) {
@@ -420,7 +416,7 @@ public class OverseerCollectionProcessor
   }
   
   protected void verifySubmitCaptures(List<SubmitCapture> submitCaptures,
-      Integer numberOfSlices, Integer numberOfReplica, Collection<String> createNodes) {
+      Integer numberOfSlices, Integer numberOfReplica, Collection<String> createNodes, boolean dontShuffleCreateNodeSet) {
     List<String> coreNames = new ArrayList<>();
     Map<String,Map<String,Integer>> sliceToNodeUrlsWithoutProtocolPartToNumberOfShardsRunningMapMap = new HashMap<>();
     List<String> nodeUrlWithoutProtocolPartForLiveNodes = new ArrayList<>(
@@ -431,6 +427,7 @@ public class OverseerCollectionProcessor
           .substring(7);
       nodeUrlWithoutProtocolPartForLiveNodes.add(nodeUrlWithoutProtocolPart);
     }
+    final Map<String,String> coreName_TO_nodeUrlWithoutProtocolPartForLiveNodes_map = new HashMap<>();
     
     for (SubmitCapture submitCapture : submitCaptures) {
       ShardRequest shardRequest = submitCapture.shardRequestCapture.getValue();
@@ -456,6 +453,7 @@ public class OverseerCollectionProcessor
           + shardRequest.shards[0],
           nodeUrlWithoutProtocolPartForLiveNodes
               .contains(shardRequest.shards[0]));
+      coreName_TO_nodeUrlWithoutProtocolPartForLiveNodes_map.put(coreName, shardRequest.shards[0]);
       assertEquals(shardRequest.shards, shardRequest.actualShards);
       
       String sliceName = shardRequest.params.get(CoreAdminParams.SHARD);
@@ -481,6 +479,16 @@ public class OverseerCollectionProcessor
         String coreName = COLLECTION_NAME + "_shard" + i + "_replica" + j;
         assertTrue("Shard " + coreName + " was not created",
             coreNames.contains(coreName));
+        
+        if (dontShuffleCreateNodeSet) {
+          final String expectedNodeName = nodeUrlWithoutProtocolPartForLiveNodes.get((numberOfReplica * (i - 1) + (j - 1)) % nodeUrlWithoutProtocolPartForLiveNodes.size());
+          assertFalse("expectedNodeName is null for coreName="+coreName, null == expectedNodeName);
+          
+          final String actualNodeName = coreName_TO_nodeUrlWithoutProtocolPartForLiveNodes_map.get(coreName);
+          assertFalse("actualNodeName is null for coreName="+coreName, null == actualNodeName);
+
+          assertTrue("node name mismatch for coreName="+coreName+" ( actual="+actualNodeName+" versus expected="+expectedNodeName+" )", actualNodeName.equals(expectedNodeName));
+        }
       }
     }
     
@@ -569,6 +577,8 @@ public class OverseerCollectionProcessor
       }
     }
     
+    if (random().nextBoolean()) Collections.shuffle(createNodeList, OverseerCollectionProcessor.RANDOM);
+    
     List<SubmitCapture> submitCaptures = null;
     if (collectionExceptedToBeCreated) {
       submitCaptures = mockShardHandlerForCreateJob(numberOfSlices,
@@ -587,7 +597,10 @@ public class OverseerCollectionProcessor
 
     startComponentUnderTest();
     
-    issueCreateJob(numberOfSlices, replicationFactor, maxShardsPerNode, (createNodeListOption != CreateNodeListOptions.SEND_NULL) ? createNodeList : null, (createNodeListOption != CreateNodeListOptions.DONT_SEND));
+    final List<String> createNodeListToSend = ((createNodeListOption != CreateNodeListOptions.SEND_NULL) ? createNodeList : null);
+    final boolean sendCreateNodeList = (createNodeListOption != CreateNodeListOptions.DONT_SEND);
+    final boolean dontShuffleCreateNodeSet = (createNodeListToSend != null) && sendCreateNodeList && random().nextBoolean();
+    issueCreateJob(numberOfSlices, replicationFactor, maxShardsPerNode, createNodeListToSend, sendCreateNodeList, !dontShuffleCreateNodeSet);
     waitForEmptyQueue(10000);
     
     if (collectionExceptedToBeCreated) {
@@ -598,11 +611,10 @@ public class OverseerCollectionProcessor
 
     if (collectionExceptedToBeCreated) {
       verifySubmitCaptures(submitCaptures, numberOfSlices, replicationFactor,
-          createNodeList);
+          createNodeList, dontShuffleCreateNodeSet);
     }
   }
-  
-  @Test
+    @Test
   public void testNoReplicationEqualNumberOfSlicesPerNode() throws Exception {
     Integer numberOfNodes = 4;
     Integer numberOfNodesToCreateOn = 4;

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java Sun Jan  4 14:53:12 2015
@@ -36,7 +36,7 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.overseer.OverseerAction;
 import org.apache.solr.common.cloud.SolrZkClient;
@@ -53,7 +53,7 @@ import org.junit.BeforeClass;
 @LuceneTestCase.Slow
 @SuppressSSL     // See SOLR-5776
 public class OverseerRolesTest  extends AbstractFullDistribZkTestBase{
-  private CloudSolrServer client;
+  private CloudSolrClient client;
 
   @BeforeClass
   public static void beforeThisClass2() throws Exception {
@@ -228,10 +228,10 @@ public class OverseerRolesTest  extends
   }
 
 
-  protected void createCollection(String COLL_NAME, CloudSolrServer client) throws Exception {
+  protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
     int replicationFactor = 2;
     int numShards = 4;
-    int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
+    int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
         .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
 
     Map<String, Object> props = makeMap(

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java Sun Jan  4 14:53:12 2015
@@ -19,7 +19,7 @@ package org.apache.solr.cloud;
 
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.common.cloud.DocRouter;

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java Sun Jan  4 14:53:12 2015
@@ -17,14 +17,16 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.util.ArrayList;
-import java.util.List;
-
 import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.internal.matchers.StringContains.containsString;
+
 /**
  * Verify that remote (proxied) queries return proper error messages
  */
@@ -54,17 +56,17 @@ public class RemoteQueryErrorTest extend
     checkForCollection("collection2", numShardsNumReplicaList, null);
     waitForRecoveriesToFinish("collection2", true);
 
-    for (SolrServer solrServer : clients) {
+    for (SolrClient solrClient : clients) {
       try {
         SolrInputDocument emptyDoc = new SolrInputDocument();
-        solrServer.add(emptyDoc);
+        solrClient.add(emptyDoc);
         fail("Expected unique key exceptoin");
       } catch (SolrException ex) {
-        assertEquals("Document is missing mandatory uniqueKey field: id", ex.getMessage());
+        assertThat(ex.getMessage(), containsString("Document is missing mandatory uniqueKey field: id"));
       } catch(Exception ex) {
         fail("Expected a SolrException to occur, instead received: " + ex.getClass());
       } finally {
-        solrServer.shutdown();
+        solrClient.shutdown();
       }
     }
   }

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java Sun Jan  4 14:53:12 2015
@@ -17,29 +17,30 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
 import org.apache.zookeeper.KeeperException;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
 // Collect useful operations for testing assigning properties to individual replicas
 // Could probably expand this to do something creative with getting random slices
 // and shards, but for now this will do.
 public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
 
-  void doPropertyAction(CloudSolrServer client, String... paramsIn) throws IOException, SolrServerException {
+  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
     assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
     ModifiableSolrParams params = new ModifiableSolrParams();
     for (int idx = 0; idx < paramsIn.length; idx += 2) {
@@ -47,11 +48,10 @@ public abstract class ReplicaPropertiesB
     }
     QueryRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
-    client.request(request);
-
+    return client.request(request);
   }
 
-  void verifyPropertyNotPresent(CloudSolrServer client, String collectionName, String replicaName,
+  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
                                 String property)
       throws KeeperException, InterruptedException {
     ClusterState clusterState = null;
@@ -76,7 +76,7 @@ public abstract class ReplicaPropertiesB
   // collection
   // shard
   // replica
-  void verifyPropertyVal(CloudSolrServer client, String collectionName,
+  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
                          String replicaName, String property, String val)
       throws InterruptedException, KeeperException {
     Replica replica = null;
@@ -102,16 +102,17 @@ public abstract class ReplicaPropertiesB
   // Verify that
   // 1> the property is only set once in all the replicas in a slice.
   // 2> the property is balanced evenly across all the nodes hosting collection
-  void verifyUniqueAcrossCollection(CloudSolrServer client, String collectionName,
+  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
                                     String property) throws KeeperException, InterruptedException {
     verifyUnique(client, collectionName, property, true);
   }
 
-  void verifyUniquePropertyWithinCollection(CloudSolrServer client, String collectionName,
+  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
                             String property) throws KeeperException, InterruptedException {
     verifyUnique(client, collectionName, property, false);
   }
-  void verifyUnique(CloudSolrServer client, String collectionName, String property, boolean balanced)
+
+  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
       throws KeeperException, InterruptedException {
 
     DocCollection col = null;

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java Sun Jan  4 14:53:12 2015
@@ -31,7 +31,7 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.Replica;
@@ -55,9 +55,7 @@ public class ReplicationFactorTest exten
   
   private static final transient Logger log = 
       LoggerFactory.getLogger(ReplicationFactorTest.class);
-  
-  private Map<URI,SocketProxy> proxies = new HashMap<URI,SocketProxy>();
-  
+
   public ReplicationFactorTest() {
     super();
     sliceCount = 3;
@@ -103,25 +101,8 @@ public class ReplicationFactorTest exten
   public JettySolrRunner createJetty(File solrHome, String dataDir,
       String shardList, String solrConfigOverride, String schemaOverride)
       throws Exception {
-    
-    JettySolrRunner jetty = new JettySolrRunner(solrHome.getPath(), context,
-        0, solrConfigOverride, schemaOverride, false,
-        getExtraServlets(), sslConfig, getExtraRequestFilters());
-    jetty.setShards(shardList);
-    jetty.setDataDir(getDataDir(dataDir));
-    
-    // setup to proxy Http requests to this server unless it is the control
-    // server
-    int proxyPort = getNextAvailablePort();
-    jetty.setProxyPort(proxyPort);
-    
-    jetty.start();
-    
-    // create a socket proxy for the jetty server ...
-    SocketProxy proxy = new SocketProxy(proxyPort, jetty.getBaseUrl().toURI());
-    proxies.put(proxy.getUrl(), proxy);
-    
-    return jetty;
+
+    return createProxiedJetty(solrHome, dataDir, shardList, solrConfigOverride, schemaOverride);
   }
   
   protected int getNextAvailablePort() throws Exception {    
@@ -205,11 +186,11 @@ public class ReplicationFactorTest exten
   
   @SuppressWarnings("rawtypes")
   protected void sendNonDirectUpdateRequestReplica(Replica replica, UpdateRequest up, int expectedRf, String collection) throws Exception {
-    HttpSolrServer solrServer = null;
+    HttpSolrClient solrServer = null;
     try {
       ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
       String url = zkProps.getBaseUrl() + "/" + collection;
-      solrServer = new HttpSolrServer(url);    
+      solrServer = new HttpSolrClient(url);
             
       NamedList resp = solrServer.request(up);
       NamedList hdr = (NamedList) resp.get("responseHeader");
@@ -320,21 +301,7 @@ public class ReplicationFactorTest exten
     Thread.sleep(2000);
     ensureAllReplicasAreActive(testCollectionName, shardId, numShards, replicationFactor, 30);    
   } 
-    
-  protected SocketProxy getProxyForReplica(Replica replica) throws Exception {
-    String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
-    assertNotNull(replicaBaseUrl);
-    URL baseUrl = new URL(replicaBaseUrl);
-    
-    SocketProxy proxy = proxies.get(baseUrl.toURI());
-    if (proxy == null && !baseUrl.toExternalForm().endsWith("/")) {
-      baseUrl = new URL(baseUrl.toExternalForm() + "/");
-      proxy = proxies.get(baseUrl.toURI());
-    }
-    assertNotNull("No proxy found for " + baseUrl + "!", proxy);
-    return proxy;
-  }
-      
+
   protected int sendDoc(int docId, int minRf) throws Exception {
     UpdateRequest up = new UpdateRequest();
     up.setParam(UpdateRequest.MIN_REPFACT, String.valueOf(minRf));

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java Sun Jan  4 14:53:12 2015
@@ -29,7 +29,7 @@ import org.apache.commons.lang.StringUti
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.LBHttpSolrServer;
+import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
@@ -115,7 +115,7 @@ public class SSLMigrationTest extends Ab
       urls.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
     }
     //Create new SolrServer to configure new HttpClient w/ SSL config
-    new LBHttpSolrServer(urls.toArray(new String[]{})).request(request);
+    new LBHttpSolrClient(urls.toArray(new String[]{})).request(request);
   }
   
 }
\ No newline at end of file

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java Sun Jan  4 14:53:12 2015
@@ -18,10 +18,10 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
@@ -313,7 +313,7 @@ public class ShardRoutingTest extends Ab
     assertEquals(8, nClients);
 
     int expectedVal = 0;
-    for (SolrServer client : clients) {
+    for (SolrClient client : clients) {
       client.add(sdoc("id", "b!doc", "foo_i", map("inc",1)));
       expectedVal++;
 

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java Sun Jan  4 14:53:12 2015
@@ -18,12 +18,13 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.http.params.CoreConnectionPNames;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
@@ -42,7 +43,6 @@ import org.junit.After;
 import org.junit.Before;
 
 import java.io.IOException;
-import java.net.MalformedURLException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -51,10 +51,9 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 
-import org.apache.lucene.util.LuceneTestCase.Slow;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 
 @Slow
 public class ShardSplitTest extends BasicDistributedZkTest {
@@ -126,7 +125,7 @@ public class ShardSplitTest extends Basi
     try {
       splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
       fail("Shard splitting with just one custom hash range should not succeed");
-    } catch (HttpSolrServer.RemoteSolrException e) {
+    } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
     }
     subRanges.clear();
@@ -137,7 +136,7 @@ public class ShardSplitTest extends Basi
     try {
       splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
       fail("Shard splitting with missing hashes in between given ranges should not succeed");
-    } catch (HttpSolrServer.RemoteSolrException e) {
+    } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
     }
     subRanges.clear();
@@ -150,7 +149,7 @@ public class ShardSplitTest extends Basi
     try {
       splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
       fail("Shard splitting with overlapping ranges should not succeed");
-    } catch (HttpSolrServer.RemoteSolrException e) {
+    } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
     }
     subRanges.clear();
@@ -220,7 +219,7 @@ public class ShardSplitTest extends Basi
           log.info("Layout after split: \n");
           printLayout();
           break;
-        } catch (HttpSolrServer.RemoteSolrException e) {
+        } catch (HttpSolrClient.RemoteSolrException e) {
           if (e.code() != 500)  {
             throw e;
           }
@@ -248,11 +247,11 @@ public class ShardSplitTest extends Basi
     String collectionName = "routeFieldColl";
     int numShards = 4;
     int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
         .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
 
     HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-    CloudSolrServer client = null;
+    CloudSolrClient client = null;
     String shard_fld = "shard_s";
     try {
       client = createCloudClient(null);
@@ -272,9 +271,9 @@ public class ShardSplitTest extends Basi
 
     waitForRecoveriesToFinish(false);
 
-    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
+    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
 
-    HttpSolrServer collectionClient = new HttpSolrServer(url);
+    HttpSolrClient collectionClient = new HttpSolrClient(url);
 
     ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
     final DocRouter router = clusterState.getCollection(collectionName).getRouter();
@@ -304,7 +303,7 @@ public class ShardSplitTest extends Basi
       try {
         splitShard(collectionName, SHARD1, null, null);
         break;
-      } catch (HttpSolrServer.RemoteSolrException e) {
+      } catch (HttpSolrClient.RemoteSolrException e) {
         if (e.code() != 500) {
           throw e;
         }
@@ -327,11 +326,11 @@ public class ShardSplitTest extends Basi
     String collectionName = "splitByRouteKeyTest";
     int numShards = 4;
     int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
         .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
 
     HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-    CloudSolrServer client = null;
+    CloudSolrClient client = null;
     try {
       client = createCloudClient(null);
       Map<String, Object> props = ZkNodeProps.makeMap(
@@ -349,9 +348,9 @@ public class ShardSplitTest extends Basi
 
     waitForRecoveriesToFinish(false);
 
-    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
+    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
 
-    HttpSolrServer collectionClient = new HttpSolrServer(url);
+    HttpSolrClient collectionClient = new HttpSolrClient(url);
 
     String splitKey = "b!";
 
@@ -389,7 +388,7 @@ public class ShardSplitTest extends Basi
       try {
         splitShard(collectionName, null, null, splitKey);
         break;
-      } catch (HttpSolrServer.RemoteSolrException e) {
+      } catch (HttpSolrClient.RemoteSolrException e) {
         if (e.code() != 500) {
           throw e;
         }
@@ -447,23 +446,23 @@ public class ShardSplitTest extends Basi
     query.set("distrib", false);
 
     ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
-    HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
+    HttpSolrClient shard1_0Client = new HttpSolrClient(shard1_0.getCoreUrl());
     QueryResponse response;
     try {
-      response = shard1_0Server.query(query);
+      response = shard1_0Client.query(query);
     } finally {
-      shard1_0Server.shutdown();
+      shard1_0Client.shutdown();
     }
     long shard10Count = response.getResults().getNumFound();
 
     ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
         AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
-    HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
+    HttpSolrClient shard1_1Client = new HttpSolrClient(shard1_1.getCoreUrl());
     QueryResponse response2;
     try {
-      response2 = shard1_1Server.query(query);
+      response2 = shard1_1Client.query(query);
     } finally {
-      shard1_1Server.shutdown();
+      shard1_1Client.shutdown();
     }
     long shard11Count = response2.getResults().getNumFound();
 
@@ -483,12 +482,12 @@ public class ShardSplitTest extends Basi
     int c = 0;
     for (Replica replica : slice.getReplicas()) {
       String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
-      HttpSolrServer server = new HttpSolrServer(coreUrl);
+      HttpSolrClient client = new HttpSolrClient(coreUrl);
       QueryResponse response;
       try {
-        response = server.query(query);
+        response = client.query(query);
       } finally {
-        server.shutdown();
+        client.shutdown();
       }
       numFound[c++] = response.getResults().getNumFound();
       log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
@@ -522,15 +521,15 @@ public class ShardSplitTest extends Basi
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
 
-    String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
         .getBaseURL();
     baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
 
-    HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
-    baseServer.setConnectionTimeout(30000);
-    baseServer.setSoTimeout(60000 * 5);
-    baseServer.request(request);
-    baseServer.shutdown();
+    HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
+    baseClient.setConnectionTimeout(30000);
+    baseClient.setSoTimeout(60000 * 5);
+    baseClient.request(request);
+    baseClient.shutdown();
   }
 
   protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
@@ -600,23 +599,23 @@ public class ShardSplitTest extends Basi
   }
 
   @Override
-  protected SolrServer createNewSolrServer(String collection, String baseUrl) {
-    HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl);
-    server.setSoTimeout(5 * 60 * 1000);
-    return server;
+  protected SolrClient createNewSolrClient(String collection, String baseUrl) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl);
+    client.setSoTimeout(5 * 60 * 1000);
+    return client;
   }
 
   @Override
-  protected SolrServer createNewSolrServer(int port) {
-    HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port);
-    server.setSoTimeout(5 * 60 * 1000);
-    return server;
+  protected SolrClient createNewSolrClient(int port) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port);
+    client.setSoTimeout(5 * 60 * 1000);
+    return client;
   }
 
   @Override
-  protected CloudSolrServer createCloudClient(String defaultCollection) {
-    CloudSolrServer client = super.createCloudClient(defaultCollection);
-    client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
+  protected CloudSolrClient createCloudClient(String defaultCollection) {
+    CloudSolrClient client = super.createCloudClient(defaultCollection);
+    client.getLbClient().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
     return client;
   }
 }

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java Sun Jan  4 14:53:12 2015
@@ -19,6 +19,7 @@ package org.apache.solr.cloud;
 
 import java.util.Collection;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutorCompletionService;
@@ -27,25 +28,29 @@ import java.util.concurrent.SynchronousQ
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.LuceneTestCase.Nightly;
+import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
+import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.cloud.hdfs.HdfsTestUtil;
 import org.apache.solr.common.cloud.ClusterStateUtil;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
 
 @Nightly
 @Slow
@@ -109,7 +114,7 @@ public class SharedFSAutoReplicaFailover
       }
     }
   }
-  
+
   // very slow tests, especially since jetty is started and stopped
   // serially
   private void testBasics() throws Exception {
@@ -137,25 +142,25 @@ public class SharedFSAutoReplicaFailover
     createCollectionRequest.setConfigName("conf1");
     createCollectionRequest.setRouterField("myOwnField");
     createCollectionRequest.setAutoAddReplicas(false);
-    CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrServer());
+    CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
 
     assertEquals(0, response2.getStatus());
     assertTrue(response2.isSuccess());
     
     waitForRecoveriesToFinish(collection2, false);
-    
+
     ChaosMonkey.stop(jettys.get(1));
     ChaosMonkey.stop(jettys.get(2));
-    
+
     Thread.sleep(3000);
-    
+
     assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLive(cloudClient.getZkStateReader(), collection1, 120000));
-    
+
     assertSliceAndReplicaCount(collection1);
-    
+
     assertEquals(4, getLiveAndActiveCount(collection1));
     assertTrue(getLiveAndActiveCount(collection2) < 4);
-    
+
     ChaosMonkey.stop(jettys);
     ChaosMonkey.stop(controlJetty);
 
@@ -163,18 +168,47 @@ public class SharedFSAutoReplicaFailover
 
     ChaosMonkey.start(jettys);
     ChaosMonkey.start(controlJetty);
-    
+
     assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLive(cloudClient.getZkStateReader(), collection1, 120000));
 
     assertSliceAndReplicaCount(collection1);
-    
-    
+
     int jettyIndex = random().nextInt(jettys.size());
     ChaosMonkey.stop(jettys.get(jettyIndex));
     ChaosMonkey.start(jettys.get(jettyIndex));
-    
+
+    assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLive(cloudClient.getZkStateReader(), collection1, 60000));
+
+    //disable autoAddReplicas
+    Map m = makeMap(
+        "action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(),
+        "name", ZkStateReader.AUTO_ADD_REPLICAS,
+        "val", "false");
+
+    SolrRequest request = new QueryRequest(new MapSolrParams(m));
+    request.setPath("/admin/collections");
+    cloudClient.request(request);
+
+    int currentCount = getLiveAndActiveCount(collection1);
+
+    ChaosMonkey.stop(jettys.get(3));
+
+    //solr-no-core.xml has defined workLoopDelay=10s and waitAfterExpiration=10s
+    //Hence waiting for 30 seconds to be on the safe side.
+    Thread.sleep(30000);
+    //Ensures that autoAddReplicas has not kicked in.
+    assertTrue(currentCount > getLiveAndActiveCount(collection1));
+
+    //enable autoAddReplicas
+    m = makeMap(
+        "action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(),
+        "name", ZkStateReader.AUTO_ADD_REPLICAS);
+
+    request = new QueryRequest(new MapSolrParams(m));
+    request.setPath("/admin/collections");
+    cloudClient.request(request);
+
     assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLive(cloudClient.getZkStateReader(), collection1, 60000));
-    
     assertSliceAndReplicaCount(collection1);
   }
 

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java Sun Jan  4 14:53:12 2015
@@ -17,13 +17,10 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 
 public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestBase {

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java Sun Jan  4 14:53:12 2015
@@ -119,7 +119,7 @@ public class SolrXmlInZkTest extends Sol
     try {
       setUpZkAndDiskXml(true, true);
       assertEquals("Should have gotten a new port the xml file sent to ZK, overrides the copy on disk",
-          cfg.getZkHostPort(), "9045");
+          cfg.getSolrHostPort(), "9045");
     } finally {
       closeZK();
     }
@@ -130,7 +130,7 @@ public class SolrXmlInZkTest extends Sol
     try {
       setUpZkAndDiskXml(true, false);
       assertEquals("Should have gotten a new port the xml file sent to ZK",
-          cfg.getZkHostPort(), "9045");
+          cfg.getSolrHostPort(), "9045");
     } finally {
       closeZK();
     }
@@ -169,7 +169,7 @@ public class SolrXmlInZkTest extends Sol
     try {
       System.clearProperty("solr.solrxml.location");
       setUpZkAndDiskXml(false, true);
-      assertEquals("Should have gotten the default port", cfg.getZkHostPort(), "8983");
+      assertEquals("Should have gotten the default port", cfg.getSolrHostPort(), "8983");
     } finally {
       closeZK();
     }

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java Sun Jan  4 14:53:12 2015
@@ -17,18 +17,11 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.common.SolrInputDocument;
@@ -44,6 +37,13 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
 /**
  * Test sync phase that occurs when Leader goes down and a new Leader is
  * elected.
@@ -128,16 +128,16 @@ public class SyncSliceTest extends Abstr
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
     
-    String baseUrl = ((HttpSolrServer) shardToJetty.get("shard1").get(2).client.solrClient)
+    String baseUrl = ((HttpSolrClient) shardToJetty.get("shard1").get(2).client.solrClient)
         .getBaseURL();
     baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
     
-    HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
+    HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
     // we only set the connect timeout, not so timeout
-    baseServer.setConnectionTimeout(30000);
-    baseServer.request(request);
-    baseServer.shutdown();
-    baseServer = null;
+    baseClient.setConnectionTimeout(30000);
+    baseClient.request(request);
+    baseClient.shutdown();
+    baseClient = null;
     
     waitForThingsToLevelOut(15);
     

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java Sun Jan  4 14:53:12 2015
@@ -21,7 +21,7 @@ package org.apache.solr.cloud;
 import com.google.common.collect.Lists;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
@@ -64,7 +64,7 @@ public class TestCollectionAPI extends R
 
   @Override
   public void doTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       createCollection(null, COLLECTION_NAME, 2, 2, 2, client, null, "conf1");
       createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
@@ -89,7 +89,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusWithCollectionAndShard() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
@@ -119,7 +119,7 @@ public class TestCollectionAPI extends R
 
 
   private void listCollection() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.LIST.toString());
@@ -141,7 +141,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusNoCollection() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
@@ -167,7 +167,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusWithCollection() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
@@ -189,7 +189,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(DEFAULT_COLLECTION);
+    CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION);
     try {
       SolrInputDocument doc = new SolrInputDocument();
       doc.addField("id", "a!123"); // goes to shard2. see ShardRoutingTest for details
@@ -222,7 +222,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusAliasTest() throws Exception  {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
@@ -259,7 +259,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusRolesTest() throws Exception  {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       client.connect();
       Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
@@ -293,7 +293,7 @@ public class TestCollectionAPI extends R
   }
 
   private void replicaPropTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       client.connect();
       Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
@@ -577,7 +577,7 @@ public class TestCollectionAPI extends R
 
 
   // Expects the map will have keys, but blank values.
-  private Map<String, String> getProps(CloudSolrServer client, String collectionName, String replicaName, String... props)
+  private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
       throws KeeperException, InterruptedException {
 
     client.getZkStateReader().updateClusterState(true);
@@ -592,7 +592,7 @@ public class TestCollectionAPI extends R
     }
     return propMap;
   }
-  private void missingParamsError(CloudSolrServer client, ModifiableSolrParams origParams)
+  private void missingParamsError(CloudSolrClient client, ModifiableSolrParams origParams)
       throws IOException, SolrServerException {
 
     SolrRequest request;

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java Sun Jan  4 14:53:12 2015
@@ -17,13 +17,11 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import org.apache.solr.client.solrj.SolrServer;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.util.StrUtils;
 import org.junit.BeforeClass;
 
@@ -116,9 +114,9 @@ public class TestDistribDocBasedVersion
     log.info("### STARTING doTestHardFail");
 
     // use a leader so we test both forwarding and non-forwarding logic
-    ss = shardToLeaderJetty.get(bucket1).client.solrClient;
+    solrClient = shardToLeaderJetty.get(bucket1).client.solrClient;
 
-    // ss = cloudClient;   CloudSolrServer doesn't currently support propagating error codes
+    // solrClient = cloudClient;   CloudSolrServer doesn't currently support propagating error codes
 
     doTestHardFail("p!doc1");
     doTestHardFail("q!doc1");
@@ -139,7 +137,7 @@ public class TestDistribDocBasedVersion
     log.info("### STARTING doTestDocVersions");
     assertEquals(2, cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
 
-    ss = cloudClient;
+    solrClient = cloudClient;
 
     vadd("b!doc1", 10);
     vadd("c!doc2", 11);
@@ -183,7 +181,7 @@ public class TestDistribDocBasedVersion
     // now test with a non-smart client
     //
     // use a leader so we test both forwarding and non-forwarding logic
-    ss = shardToLeaderJetty.get(bucket1).client.solrClient;
+    solrClient = shardToLeaderJetty.get(bucket1).client.solrClient;
 
     vadd("b!doc5", 10);
     vadd("c!doc6", 11);
@@ -237,7 +235,7 @@ public class TestDistribDocBasedVersion
 
   }
 
-  SolrServer ss;
+  SolrClient solrClient;
 
   void vdelete(String id, long version, String... params) throws Exception {
     UpdateRequest req = new UpdateRequest();
@@ -246,7 +244,7 @@ public class TestDistribDocBasedVersion
     for (int i=0; i<params.length; i+=2) {
       req.setParam( params[i], params[i+1]);
     }
-    ss.request(req);
+    solrClient.request(req);
     // req.process(cloudClient);
   }
 
@@ -256,7 +254,7 @@ public class TestDistribDocBasedVersion
     for (int i=0; i<params.length; i+=2) {
       req.setParam( params[i], params[i+1]);
     }
-    ss.request(req);
+    solrClient.request(req);
   }
 
   void vaddFail(String id, long version, int errCode, String... params) throws Exception {
@@ -315,7 +313,7 @@ public class TestDistribDocBasedVersion
       expectedIds.put(strs.get(i), Long.valueOf(verS.get(i)));
     }
 
-    ss.query(params("qt","/get", "ids",ids));
+    solrClient.query(params("qt", "/get", "ids", ids));
 
     QueryResponse rsp = cloudClient.query(params("qt","/get", "ids",ids));
     Map<String, Object> obtainedIds = new HashMap<>();
@@ -327,7 +325,7 @@ public class TestDistribDocBasedVersion
   }
 
   void doRTG(String ids) throws Exception {
-    ss.query(params("qt","/get", "ids",ids));
+    solrClient.query(params("qt", "/get", "ids", ids));
 
     Set<String> expectedIds = new HashSet<>( StrUtils.splitSmart(ids, ",", true) );
 

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java Sun Jan  4 14:53:12 2015
@@ -17,18 +17,13 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.File;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrInputDocument;
@@ -37,10 +32,7 @@ import org.apache.solr.common.cloud.Repl
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.util.RevertDefaultThreadHandlerRule;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -52,7 +44,11 @@ import org.junit.rules.TestRule;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
+import java.io.File;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Test of the MiniSolrCloudCluster functionality. Keep in mind, 
@@ -89,14 +85,6 @@ public class TestMiniSolrCloudCluster ex
       miniCluster.shutdown();
     }
     miniCluster = null;
-    System.clearProperty("solr.tests.mergePolicy");
-    System.clearProperty("solr.tests.maxBufferedDocs");
-    System.clearProperty("solr.tests.maxIndexingThreads");
-    System.clearProperty("solr.tests.ramBufferSizeMB");
-    System.clearProperty("solr.tests.mergeScheduler");
-    System.clearProperty("solr.directoryFactory");
-    System.clearProperty("solr.solrxml.location");
-    System.clearProperty("zkHost");
   }
 
   @Test
@@ -118,33 +106,38 @@ public class TestMiniSolrCloudCluster ex
     assertTrue(startedServer.isRunning());
     assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
 
-    CloudSolrServer cloudSolrServer = null;
-    SolrZkClient zkClient = null;
-    try {
-      cloudSolrServer = new CloudSolrServer(miniCluster.getZkServer().getZkAddress(), true);
-      cloudSolrServer.connect();
-      zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(),
-        AbstractZkTestCase.TIMEOUT, 45000, null);
-
-      // create collection
-      String collectionName = "testSolrCloudCollection";
-      String configName = "solrCloudCollectionConfig";
-      System.setProperty("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
-      uploadConfigToZk(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf", configName);
-      createCollection(cloudSolrServer, collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName);
-
+    // create collection
+    String collectionName = "testSolrCloudCollection";
+    String configName = "solrCloudCollectionConfig";
+    File configDir = new File(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
+    miniCluster.uploadConfigDir(configDir, configName);
+    
+    Map<String, String> collectionProperties = new HashMap<>();
+    collectionProperties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
+    collectionProperties.put("solr.tests.maxBufferedDocs", "100000");
+    collectionProperties.put("solr.tests.maxIndexingThreads", "-1");
+    collectionProperties.put("solr.tests.ramBufferSizeMB", "100");
+    // use non-test classes so RandomizedRunner isn't necessary
+    collectionProperties.put("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
+    collectionProperties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
+    collectionProperties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
+    miniCluster.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName, collectionProperties);
+    
+    try(SolrZkClient zkClient = new SolrZkClient
+        (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, 45000, null)) {
+      ZkStateReader zkStateReader = new ZkStateReader(zkClient);
+      waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
+      
       // modify/query collection
-      cloudSolrServer.setDefaultCollection(collectionName);
+      CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
+      cloudSolrClient.setDefaultCollection(collectionName);
       SolrInputDocument doc = new SolrInputDocument();
       doc.setField("id", "1");
-
-      ZkStateReader zkStateReader = new ZkStateReader(zkClient);
-      waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-      cloudSolrServer.add(doc);
-      cloudSolrServer.commit();
+      cloudSolrClient.add(doc);
+      cloudSolrClient.commit();
       SolrQuery query = new SolrQuery();
       query.setQuery("*:*");
-      QueryResponse rsp = cloudSolrServer.query(query);
+      QueryResponse rsp = cloudSolrClient.query(query);
       assertEquals(1, rsp.getResults().getNumFound());
 
       // remove a server not hosting any replicas
@@ -172,64 +165,9 @@ public class TestMiniSolrCloudCluster ex
           assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
         }
       }
-    } finally {
-      if (cloudSolrServer != null) {
-        cloudSolrServer.shutdown();
-      }
-      if (zkClient != null) {
-        zkClient.close();
-      }
     }
   }
 
-  protected void uploadConfigToZk(String configDir, String configName) throws Exception {
-    // override settings in the solrconfig include
-    System.setProperty("solr.tests.maxBufferedDocs", "100000");
-    System.setProperty("solr.tests.maxIndexingThreads", "-1");
-    System.setProperty("solr.tests.ramBufferSizeMB", "100");
-    // use non-test classes so RandomizedRunner isn't necessary
-    System.setProperty("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
-    System.setProperty("solr.directoryFactory", "solr.RAMDirectoryFactory");
-
-    SolrZkClient zkClient = null;
-    try {
-      zkClient =  new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, 45000, null);
-      uploadConfigFileToZk(zkClient, configName, "solrconfig.xml", new File(configDir, "solrconfig-tlog.xml"));
-      uploadConfigFileToZk(zkClient, configName, "schema.xml", new File(configDir, "schema.xml"));
-      uploadConfigFileToZk(zkClient, configName, "solrconfig.snippet.randomindexconfig.xml",
-        new File(configDir, "solrconfig.snippet.randomindexconfig.xml"));
-      uploadConfigFileToZk(zkClient, configName, "currency.xml", new File(configDir, "currency.xml"));
-      uploadConfigFileToZk(zkClient, configName, "mapping-ISOLatin1Accent.txt",
-        new File(configDir, "mapping-ISOLatin1Accent.txt"));
-      uploadConfigFileToZk(zkClient, configName, "old_synonyms.txt", new File(configDir, "old_synonyms.txt"));
-      uploadConfigFileToZk(zkClient, configName, "open-exchange-rates.json",
-        new File(configDir, "open-exchange-rates.json"));
-      uploadConfigFileToZk(zkClient, configName, "protwords.txt", new File(configDir, "protwords.txt"));
-      uploadConfigFileToZk(zkClient, configName, "stopwords.txt", new File(configDir, "stopwords.txt"));
-      uploadConfigFileToZk(zkClient, configName, "synonyms.txt", new File(configDir, "synonyms.txt"));
-    } finally {
-      if (zkClient != null) zkClient.close();
-    }
-  }
-
-  protected void uploadConfigFileToZk(SolrZkClient zkClient, String configName, String nameInZk, File file)
-      throws Exception {
-    zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + configName + "/" + nameInZk, file, false, true);
-  }
-
-  protected NamedList<Object> createCollection(CloudSolrServer server, String name, int numShards,
-      int replicationFactor, String configName) throws Exception {
-    ModifiableSolrParams modParams = new ModifiableSolrParams();
-    modParams.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name());
-    modParams.set("name", name);
-    modParams.set("numShards", numShards);
-    modParams.set("replicationFactor", replicationFactor);
-    modParams.set("collection.configName", configName);
-    QueryRequest request = new QueryRequest(modParams);
-    request.setPath("/admin/collections");
-    return server.request(request);
-  }
-
   protected void waitForRecoveriesToFinish(String collection,
       ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout, int timeoutSeconds)
       throws Exception {

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java Sun Jan  4 14:53:12 2015
@@ -18,7 +18,7 @@ package org.apache.solr.cloud;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -28,6 +28,8 @@ import org.apache.solr.common.util.Simpl
 import java.io.File;
 import java.nio.charset.StandardCharsets;
 
+import static org.junit.internal.matchers.StringContains.containsString;
+
 public class TestModifyConfFiles extends AbstractFullDistribZkTestBase {
 
   public TestModifyConfFiles() {
@@ -37,7 +39,7 @@ public class TestModifyConfFiles extends
   @Override
   public void doTest() throws Exception {
     int which = r.nextInt(clients.size());
-    HttpSolrServer client = (HttpSolrServer) clients.get(which);
+    HttpSolrClient client = (HttpSolrClient) clients.get(which);
 
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("op", "write");
@@ -48,7 +50,7 @@ public class TestModifyConfFiles extends
       client.request(request);
       fail("Should have caught exception");
     } catch (Exception e) {
-      assertEquals(e.getMessage(), "Input stream list was null for admin file write operation.");
+      assertThat(e.getMessage(), containsString("Input stream list was null for admin file write operation."));
     }
 
     params.remove("file");
@@ -60,7 +62,7 @@ public class TestModifyConfFiles extends
       client.request(request);
       fail("Should have caught exception");
     } catch (Exception e) {
-      assertEquals(e.getMessage(), "No file name specified for write operation.");
+      assertThat(e.getMessage(), containsString("No file name specified for write operation."));
     }
 
     params.set("op", "write");
@@ -71,7 +73,7 @@ public class TestModifyConfFiles extends
       client.request(request);
       fail("Should have caught exception");
     } catch (Exception e) {
-      assertEquals(e.getMessage(), "Can not access: bogus.txt");
+      assertThat(e.getMessage(), containsString("Can not access: bogus.txt"));
     }
 
     try {

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java Sun Jan  4 14:53:12 2015
@@ -26,7 +26,7 @@ import java.util.Map;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
@@ -58,7 +58,7 @@ public class TestReplicaProperties exten
 
   @Override
   public void doTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
       // shards, replicationfactor, maxreplicaspernode
@@ -81,7 +81,7 @@ public class TestReplicaProperties exten
   }
 
   private void listCollection() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.LIST.toString());
@@ -101,7 +101,7 @@ public class TestReplicaProperties exten
 
 
   private void clusterAssignPropertyTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       client.connect();
       try {
@@ -193,7 +193,7 @@ public class TestReplicaProperties exten
       // leaders _also_ have the preferredLeader property set.
 
 
-      doPropertyAction(client,
+      NamedList<Object> res = doPropertyAction(client,
           "action", CollectionParams.CollectionAction.REBALANCELEADERS.toString(),
           "collection", COLLECTION_NAME);
 
@@ -204,7 +204,7 @@ public class TestReplicaProperties exten
     }
   }
 
-  private void verifyLeaderAssignment(CloudSolrServer client, String collectionName)
+  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
       throws InterruptedException, KeeperException {
     String lastFailMsg = "";
     for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
@@ -239,7 +239,7 @@ public class TestReplicaProperties exten
     fail(lastFailMsg);
   }
 
-  private void addProperty(CloudSolrServer client, String... paramsIn) throws IOException, SolrServerException {
+  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
     assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
     ModifiableSolrParams params = new ModifiableSolrParams();
     for (int idx = 0; idx < paramsIn.length; idx += 2) {

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java Sun Jan  4 14:53:12 2015
@@ -19,7 +19,7 @@ package org.apache.solr.cloud;
 
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -214,12 +214,12 @@ public class TestRequestStatusCollection
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
 
-    String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
         .getBaseURL();
     baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
 
-    HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
-    baseServer.setConnectionTimeout(15000);
-    return baseServer.request(request);
+    HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
+    baseClient.setConnectionTimeout(15000);
+    return baseClient.request(request);
   }
 }

Modified: lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java (original)
+++ lucene/dev/branches/lucene6005/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java Sun Jan  4 14:53:12 2015
@@ -18,7 +18,7 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.params.ShardParams;
@@ -48,7 +48,7 @@ public class TestShortCircuitedRequests
     // query shard3 directly with _route_=a! so that we trigger the short circuited request path
     Replica shard3 = cloudClient.getZkStateReader().getClusterState().getLeader(DEFAULT_COLLECTION, "shard3");
     String nodeName = shard3.getNodeName();
-    SolrServer shard3Client = getClient(nodeName);
+    SolrClient shard3Client = getClient(nodeName);
     QueryResponse response = shard3Client.query(new SolrQuery("*:*").add(ShardParams._ROUTE_, "a!").add(ShardParams.SHARDS_INFO, "true"));
 
     assertEquals("Could not find doc", 1, response.getResults().getNumFound());