You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ro...@apache.org on 2014/12/31 15:05:50 UTC

svn commit: r1648697 [4/13] - in /lucene/dev/trunk/solr: ./ contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/ contrib/map-reduce/src/java/org/apache/solr/hadoop...

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MultiThreadedOCPTest.java Wed Dec 31 14:05:48 2014
@@ -18,9 +18,9 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.RequestStatus;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest.SplitShard;
@@ -76,7 +76,7 @@ public class MultiThreadedOCPTest extend
   }
 
   private void testParallelCollectionAPICalls() throws IOException, SolrServerException {
-    SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+    SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
 
     for(int i = 1 ; i <= NUM_COLLECTIONS ; i++) {
       Create createCollectionRequest = new Create();
@@ -84,7 +84,7 @@ public class MultiThreadedOCPTest extend
       createCollectionRequest.setNumShards(4);
       createCollectionRequest.setConfigName("conf1");
       createCollectionRequest.setAsyncId(String.valueOf(i));
-      createCollectionRequest.process(server);
+      createCollectionRequest.process(client);
     }
 
     boolean pass = false;
@@ -92,7 +92,7 @@ public class MultiThreadedOCPTest extend
     while(true) {
       int numRunningTasks = 0;
       for (int i = 1; i <= NUM_COLLECTIONS; i++)
-        if (getRequestState(i + "", server).equals("running"))
+        if (getRequestState(i + "", client).equals("running"))
           numRunningTasks++;
       if(numRunningTasks > 1) {
         pass = true;
@@ -107,38 +107,38 @@ public class MultiThreadedOCPTest extend
     }
     assertTrue("More than one tasks were supposed to be running in parallel but they weren't.", pass);
     for(int i=1;i<=NUM_COLLECTIONS;i++) {
-      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
+      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
       assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
     }
   }
 
   private void testTaskExclusivity() throws IOException, SolrServerException {
-    SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+    SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
     Create createCollectionRequest = new Create();
     createCollectionRequest.setCollectionName("ocptest_shardsplit");
     createCollectionRequest.setNumShards(4);
     createCollectionRequest.setConfigName("conf1");
     createCollectionRequest.setAsyncId("1000");
-    createCollectionRequest.process(server);
+    createCollectionRequest.process(client);
 
     SplitShard splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit");
     splitShardRequest.setShardName(SHARD1);
     splitShardRequest.setAsyncId("1001");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit");
     splitShardRequest.setShardName(SHARD2);
     splitShardRequest.setAsyncId("1002");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     int iterations = 0;
     while(true) {
       int runningTasks = 0;
       int completedTasks = 0;
       for (int i=1001;i<=1002;i++) {
-        String state = getRequestState(i, server);
+        String state = getRequestState(i, client);
         if (state.equals("running"))
           runningTasks++;
         if (state.equals("completed"))
@@ -161,45 +161,45 @@ public class MultiThreadedOCPTest extend
       }
     }
     for (int i=1001;i<=1002;i++) {
-      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
+      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
       assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
     }
   }
 
   private void testDeduplicationOfSubmittedTasks() throws IOException, SolrServerException {
-    SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+    SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
     Create createCollectionRequest = new Create();
     createCollectionRequest.setCollectionName("ocptest_shardsplit2");
     createCollectionRequest.setNumShards(4);
     createCollectionRequest.setConfigName("conf1");
     createCollectionRequest.setAsyncId("3000");
-    createCollectionRequest.process(server);
+    createCollectionRequest.process(client);
 
     SplitShard splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit2");
     splitShardRequest.setShardName(SHARD1);
     splitShardRequest.setAsyncId("3001");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit2");
     splitShardRequest.setShardName(SHARD2);
     splitShardRequest.setAsyncId("3002");
-    splitShardRequest.process(server);
+    splitShardRequest.process(client);
 
     // Now submit another task with the same id. At this time, hopefully the previous 3002 should still be in the queue.
     splitShardRequest = new SplitShard();
     splitShardRequest.setCollectionName("ocptest_shardsplit2");
     splitShardRequest.setShardName(SHARD1);
     splitShardRequest.setAsyncId("3002");
-    CollectionAdminResponse response = splitShardRequest.process(server);
+    CollectionAdminResponse response = splitShardRequest.process(client);
 
     NamedList r = response.getResponse();
     assertEquals("Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.",
         "Task with the same requestid already exists.", r.get("error"));
 
     for (int i=3001;i<=3002;i++) {
-      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
+      String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
       assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
     }
   }
@@ -224,16 +224,16 @@ public class MultiThreadedOCPTest extend
 
     try {
 
-      SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+      SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
       SplitShard splitShardRequest = new SplitShard();
       splitShardRequest.setCollectionName("collection1");
       splitShardRequest.setShardName(SHARD1);
       splitShardRequest.setAsyncId("2000");
-      splitShardRequest.process(server);
+      splitShardRequest.process(client);
 
-      String state = getRequestState("2000", server);
+      String state = getRequestState("2000", client);
       while (state.equals("submitted")) {
-        state = getRequestState("2000", server);
+        state = getRequestState("2000", client);
         Thread.sleep(10);
       }
       assertTrue("SplitShard task [2000] was supposed to be in [running] but isn't. It is [" + state + "]", state.equals("running"));
@@ -246,9 +246,9 @@ public class MultiThreadedOCPTest extend
       SolrRequest request = new QueryRequest(params);
       request.setPath("/admin/collections");
 
-      server.request(request);
+      client.request(request);
 
-      state = getRequestState("2000", server);
+      state = getRequestState("2000", client);
 
       assertTrue("After invoking OVERSEERSTATUS, SplitShard task [2000] was still supposed to be in [running] but isn't." +
           "It is [" + state + "]", state.equals("running"));
@@ -267,13 +267,13 @@ public class MultiThreadedOCPTest extend
     // todo - target diff servers and use cloud clients as well as non-cloud clients
   }
 
-  private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrServer server)
+  private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client)
       throws IOException, SolrServerException {
     String state = null;
     long maxWait = System.nanoTime() + TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS);
 
     while (System.nanoTime() < maxWait)  {
-      state = getRequestState(requestId, server);
+      state = getRequestState(requestId, client);
       if(state.equals("completed") || state.equals("failed"))
         return state;
       try {
@@ -285,14 +285,14 @@ public class MultiThreadedOCPTest extend
     return state;
   }
 
-  private String getRequestState(int requestId, SolrServer server) throws IOException, SolrServerException {
-    return getRequestState(String.valueOf(requestId), server);
+  private String getRequestState(int requestId, SolrClient client) throws IOException, SolrServerException {
+    return getRequestState(String.valueOf(requestId), client);
   }
 
-  private String getRequestState(String requestId, SolrServer server) throws IOException, SolrServerException {
+  private String getRequestState(String requestId, SolrClient client) throws IOException, SolrServerException {
     RequestStatus requestStatusRequest = new RequestStatus();
     requestStatusRequest.setRequestId(requestId);
-    CollectionAdminResponse response = requestStatusRequest.process(server);
+    CollectionAdminResponse response = requestStatusRequest.process(client);
 
     NamedList innerResponse = (NamedList) response.getResponse().get("status");
     return (String) innerResponse.get("state");

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java Wed Dec 31 14:05:48 2014
@@ -36,7 +36,7 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.overseer.OverseerAction;
 import org.apache.solr.common.cloud.SolrZkClient;
@@ -53,7 +53,7 @@ import org.junit.BeforeClass;
 @LuceneTestCase.Slow
 @SuppressSSL     // See SOLR-5776
 public class OverseerRolesTest  extends AbstractFullDistribZkTestBase{
-  private CloudSolrServer client;
+  private CloudSolrClient client;
 
   @BeforeClass
   public static void beforeThisClass2() throws Exception {
@@ -228,10 +228,10 @@ public class OverseerRolesTest  extends
   }
 
 
-  protected void createCollection(String COLL_NAME, CloudSolrServer client) throws Exception {
+  protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
     int replicationFactor = 2;
     int numShards = 4;
-    int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
+    int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
         .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
 
     Map<String, Object> props = makeMap(

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java Wed Dec 31 14:05:48 2014
@@ -19,7 +19,7 @@ package org.apache.solr.cloud;
 
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.common.cloud.DocRouter;

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java Wed Dec 31 14:05:48 2014
@@ -18,7 +18,7 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 
@@ -56,17 +56,17 @@ public class RemoteQueryErrorTest extend
     checkForCollection("collection2", numShardsNumReplicaList, null);
     waitForRecoveriesToFinish("collection2", true);
 
-    for (SolrServer solrServer : clients) {
+    for (SolrClient solrClient : clients) {
       try {
         SolrInputDocument emptyDoc = new SolrInputDocument();
-        solrServer.add(emptyDoc);
+        solrClient.add(emptyDoc);
         fail("Expected unique key exceptoin");
       } catch (SolrException ex) {
         assertThat(ex.getMessage(), containsString("Document is missing mandatory uniqueKey field: id"));
       } catch(Exception ex) {
         fail("Expected a SolrException to occur, instead received: " + ex.getClass());
       } finally {
-        solrServer.shutdown();
+        solrClient.shutdown();
       }
     }
   }

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java Wed Dec 31 14:05:48 2014
@@ -17,15 +17,9 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -35,12 +29,18 @@ import org.apache.solr.common.params.Mod
 import org.apache.solr.common.util.NamedList;
 import org.apache.zookeeper.KeeperException;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
 // Collect useful operations for testing assigning properties to individual replicas
 // Could probably expand this to do something creative with getting random slices
 // and shards, but for now this will do.
 public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
 
-  public static NamedList<Object> doPropertyAction(CloudSolrServer client, String... paramsIn) throws IOException, SolrServerException {
+  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
     assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
     ModifiableSolrParams params = new ModifiableSolrParams();
     for (int idx = 0; idx < paramsIn.length; idx += 2) {
@@ -51,7 +51,7 @@ public abstract class ReplicaPropertiesB
     return client.request(request);
   }
 
-  public static void verifyPropertyNotPresent(CloudSolrServer client, String collectionName, String replicaName,
+  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
                                 String property)
       throws KeeperException, InterruptedException {
     ClusterState clusterState = null;
@@ -76,7 +76,7 @@ public abstract class ReplicaPropertiesB
   // collection
   // shard
   // replica
-  public static void verifyPropertyVal(CloudSolrServer client, String collectionName,
+  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
                          String replicaName, String property, String val)
       throws InterruptedException, KeeperException {
     Replica replica = null;
@@ -102,16 +102,17 @@ public abstract class ReplicaPropertiesB
   // Verify that
   // 1> the property is only set once in all the replicas in a slice.
   // 2> the property is balanced evenly across all the nodes hosting collection
-  public static void verifyUniqueAcrossCollection(CloudSolrServer client, String collectionName,
+  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
                                     String property) throws KeeperException, InterruptedException {
     verifyUnique(client, collectionName, property, true);
   }
 
-  public static void verifyUniquePropertyWithinCollection(CloudSolrServer client, String collectionName,
+  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
                             String property) throws KeeperException, InterruptedException {
     verifyUnique(client, collectionName, property, false);
   }
-  public static void verifyUnique(CloudSolrServer client, String collectionName, String property, boolean balanced)
+
+  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
       throws KeeperException, InterruptedException {
 
     DocCollection col = null;

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java Wed Dec 31 14:05:48 2014
@@ -31,7 +31,7 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.Replica;
@@ -205,11 +205,11 @@ public class ReplicationFactorTest exten
   
   @SuppressWarnings("rawtypes")
   protected void sendNonDirectUpdateRequestReplica(Replica replica, UpdateRequest up, int expectedRf, String collection) throws Exception {
-    HttpSolrServer solrServer = null;
+    HttpSolrClient solrServer = null;
     try {
       ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
       String url = zkProps.getBaseUrl() + "/" + collection;
-      solrServer = new HttpSolrServer(url);    
+      solrServer = new HttpSolrClient(url);
             
       NamedList resp = solrServer.request(up);
       NamedList hdr = (NamedList) resp.get("responseHeader");

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java Wed Dec 31 14:05:48 2014
@@ -29,7 +29,7 @@ import org.apache.commons.lang.StringUti
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.LBHttpSolrServer;
+import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
@@ -115,7 +115,7 @@ public class SSLMigrationTest extends Ab
       urls.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
     }
     //Create new SolrServer to configure new HttpClient w/ SSL config
-    new LBHttpSolrServer(urls.toArray(new String[]{})).request(request);
+    new LBHttpSolrClient(urls.toArray(new String[]{})).request(request);
   }
   
 }
\ No newline at end of file

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java Wed Dec 31 14:05:48 2014
@@ -18,10 +18,10 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
@@ -313,7 +313,7 @@ public class ShardRoutingTest extends Ab
     assertEquals(8, nClients);
 
     int expectedVal = 0;
-    for (SolrServer client : clients) {
+    for (SolrClient client : clients) {
       client.add(sdoc("id", "b!doc", "foo_i", map("inc",1)));
       expectedVal++;
 

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java Wed Dec 31 14:05:48 2014
@@ -18,12 +18,13 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.http.params.CoreConnectionPNames;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
@@ -42,7 +43,6 @@ import org.junit.After;
 import org.junit.Before;
 
 import java.io.IOException;
-import java.net.MalformedURLException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -51,10 +51,9 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 
-import org.apache.lucene.util.LuceneTestCase.Slow;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 
 @Slow
 public class ShardSplitTest extends BasicDistributedZkTest {
@@ -126,7 +125,7 @@ public class ShardSplitTest extends Basi
     try {
       splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
       fail("Shard splitting with just one custom hash range should not succeed");
-    } catch (HttpSolrServer.RemoteSolrException e) {
+    } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
     }
     subRanges.clear();
@@ -137,7 +136,7 @@ public class ShardSplitTest extends Basi
     try {
       splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
       fail("Shard splitting with missing hashes in between given ranges should not succeed");
-    } catch (HttpSolrServer.RemoteSolrException e) {
+    } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
     }
     subRanges.clear();
@@ -150,7 +149,7 @@ public class ShardSplitTest extends Basi
     try {
       splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
       fail("Shard splitting with overlapping ranges should not succeed");
-    } catch (HttpSolrServer.RemoteSolrException e) {
+    } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
     }
     subRanges.clear();
@@ -220,7 +219,7 @@ public class ShardSplitTest extends Basi
           log.info("Layout after split: \n");
           printLayout();
           break;
-        } catch (HttpSolrServer.RemoteSolrException e) {
+        } catch (HttpSolrClient.RemoteSolrException e) {
           if (e.code() != 500)  {
             throw e;
           }
@@ -248,11 +247,11 @@ public class ShardSplitTest extends Basi
     String collectionName = "routeFieldColl";
     int numShards = 4;
     int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
         .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
 
     HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-    CloudSolrServer client = null;
+    CloudSolrClient client = null;
     String shard_fld = "shard_s";
     try {
       client = createCloudClient(null);
@@ -272,9 +271,9 @@ public class ShardSplitTest extends Basi
 
     waitForRecoveriesToFinish(false);
 
-    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
+    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
 
-    HttpSolrServer collectionClient = new HttpSolrServer(url);
+    HttpSolrClient collectionClient = new HttpSolrClient(url);
 
     ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
     final DocRouter router = clusterState.getCollection(collectionName).getRouter();
@@ -304,7 +303,7 @@ public class ShardSplitTest extends Basi
       try {
         splitShard(collectionName, SHARD1, null, null);
         break;
-      } catch (HttpSolrServer.RemoteSolrException e) {
+      } catch (HttpSolrClient.RemoteSolrException e) {
         if (e.code() != 500) {
           throw e;
         }
@@ -327,11 +326,11 @@ public class ShardSplitTest extends Basi
     String collectionName = "splitByRouteKeyTest";
     int numShards = 4;
     int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
         .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
 
     HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-    CloudSolrServer client = null;
+    CloudSolrClient client = null;
     try {
       client = createCloudClient(null);
       Map<String, Object> props = ZkNodeProps.makeMap(
@@ -349,9 +348,9 @@ public class ShardSplitTest extends Basi
 
     waitForRecoveriesToFinish(false);
 
-    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
+    String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
 
-    HttpSolrServer collectionClient = new HttpSolrServer(url);
+    HttpSolrClient collectionClient = new HttpSolrClient(url);
 
     String splitKey = "b!";
 
@@ -389,7 +388,7 @@ public class ShardSplitTest extends Basi
       try {
         splitShard(collectionName, null, null, splitKey);
         break;
-      } catch (HttpSolrServer.RemoteSolrException e) {
+      } catch (HttpSolrClient.RemoteSolrException e) {
         if (e.code() != 500) {
           throw e;
         }
@@ -447,23 +446,23 @@ public class ShardSplitTest extends Basi
     query.set("distrib", false);
 
     ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
-    HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
+    HttpSolrClient shard1_0Client = new HttpSolrClient(shard1_0.getCoreUrl());
     QueryResponse response;
     try {
-      response = shard1_0Server.query(query);
+      response = shard1_0Client.query(query);
     } finally {
-      shard1_0Server.shutdown();
+      shard1_0Client.shutdown();
     }
     long shard10Count = response.getResults().getNumFound();
 
     ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
         AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
-    HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
+    HttpSolrClient shard1_1Client = new HttpSolrClient(shard1_1.getCoreUrl());
     QueryResponse response2;
     try {
-      response2 = shard1_1Server.query(query);
+      response2 = shard1_1Client.query(query);
     } finally {
-      shard1_1Server.shutdown();
+      shard1_1Client.shutdown();
     }
     long shard11Count = response2.getResults().getNumFound();
 
@@ -483,12 +482,12 @@ public class ShardSplitTest extends Basi
     int c = 0;
     for (Replica replica : slice.getReplicas()) {
       String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
-      HttpSolrServer server = new HttpSolrServer(coreUrl);
+      HttpSolrClient client = new HttpSolrClient(coreUrl);
       QueryResponse response;
       try {
-        response = server.query(query);
+        response = client.query(query);
       } finally {
-        server.shutdown();
+        client.shutdown();
       }
       numFound[c++] = response.getResults().getNumFound();
       log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
@@ -522,15 +521,15 @@ public class ShardSplitTest extends Basi
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
 
-    String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
         .getBaseURL();
     baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
 
-    HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
-    baseServer.setConnectionTimeout(30000);
-    baseServer.setSoTimeout(60000 * 5);
-    baseServer.request(request);
-    baseServer.shutdown();
+    HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
+    baseClient.setConnectionTimeout(30000);
+    baseClient.setSoTimeout(60000 * 5);
+    baseClient.request(request);
+    baseClient.shutdown();
   }
 
   protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
@@ -600,23 +599,23 @@ public class ShardSplitTest extends Basi
   }
 
   @Override
-  protected SolrServer createNewSolrServer(String collection, String baseUrl) {
-    HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl);
-    server.setSoTimeout(5 * 60 * 1000);
-    return server;
+  protected SolrClient createNewSolrClient(String collection, String baseUrl) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl);
+    client.setSoTimeout(5 * 60 * 1000);
+    return client;
   }
 
   @Override
-  protected SolrServer createNewSolrServer(int port) {
-    HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port);
-    server.setSoTimeout(5 * 60 * 1000);
-    return server;
+  protected SolrClient createNewSolrClient(int port) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port);
+    client.setSoTimeout(5 * 60 * 1000);
+    return client;
   }
 
   @Override
-  protected CloudSolrServer createCloudClient(String defaultCollection) {
-    CloudSolrServer client = super.createCloudClient(defaultCollection);
-    client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
+  protected CloudSolrClient createCloudClient(String defaultCollection) {
+    CloudSolrClient client = super.createCloudClient(defaultCollection);
+    client.getLbClient().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
     return client;
   }
 }

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SharedFSAutoReplicaFailoverTest.java Wed Dec 31 14:05:48 2014
@@ -142,7 +142,7 @@ public class SharedFSAutoReplicaFailover
     createCollectionRequest.setConfigName("conf1");
     createCollectionRequest.setRouterField("myOwnField");
     createCollectionRequest.setAutoAddReplicas(false);
-    CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrServer());
+    CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
 
     assertEquals(0, response2.getStatus());
     assertTrue(response2.isSuccess());

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java Wed Dec 31 14:05:48 2014
@@ -17,13 +17,10 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 
 public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestBase {

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java Wed Dec 31 14:05:48 2014
@@ -17,18 +17,11 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.common.SolrInputDocument;
@@ -44,6 +37,13 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
 /**
  * Test sync phase that occurs when Leader goes down and a new Leader is
  * elected.
@@ -128,16 +128,16 @@ public class SyncSliceTest extends Abstr
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
     
-    String baseUrl = ((HttpSolrServer) shardToJetty.get("shard1").get(2).client.solrClient)
+    String baseUrl = ((HttpSolrClient) shardToJetty.get("shard1").get(2).client.solrClient)
         .getBaseURL();
     baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
     
-    HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
+    HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
     // we only set the connect timeout, not so timeout
-    baseServer.setConnectionTimeout(30000);
-    baseServer.request(request);
-    baseServer.shutdown();
-    baseServer = null;
+    baseClient.setConnectionTimeout(30000);
+    baseClient.request(request);
+    baseClient.shutdown();
+    baseClient = null;
     
     waitForThingsToLevelOut(15);
     

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java Wed Dec 31 14:05:48 2014
@@ -21,7 +21,7 @@ package org.apache.solr.cloud;
 import com.google.common.collect.Lists;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
@@ -64,7 +64,7 @@ public class TestCollectionAPI extends R
 
   @Override
   public void doTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       createCollection(null, COLLECTION_NAME, 2, 2, 2, client, null, "conf1");
       createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
@@ -89,7 +89,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusWithCollectionAndShard() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
@@ -119,7 +119,7 @@ public class TestCollectionAPI extends R
 
 
   private void listCollection() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.LIST.toString());
@@ -141,7 +141,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusNoCollection() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
@@ -167,7 +167,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusWithCollection() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
@@ -189,7 +189,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(DEFAULT_COLLECTION);
+    CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION);
     try {
       SolrInputDocument doc = new SolrInputDocument();
       doc.addField("id", "a!123"); // goes to shard2. see ShardRoutingTest for details
@@ -222,7 +222,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusAliasTest() throws Exception  {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
@@ -259,7 +259,7 @@ public class TestCollectionAPI extends R
   }
 
   private void clusterStatusRolesTest() throws Exception  {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       client.connect();
       Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
@@ -293,7 +293,7 @@ public class TestCollectionAPI extends R
   }
 
   private void replicaPropTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       client.connect();
       Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
@@ -577,7 +577,7 @@ public class TestCollectionAPI extends R
 
 
   // Expects the map will have keys, but blank values.
-  private Map<String, String> getProps(CloudSolrServer client, String collectionName, String replicaName, String... props)
+  private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
       throws KeeperException, InterruptedException {
 
     client.getZkStateReader().updateClusterState(true);
@@ -592,7 +592,7 @@ public class TestCollectionAPI extends R
     }
     return propMap;
   }
-  private void missingParamsError(CloudSolrServer client, ModifiableSolrParams origParams)
+  private void missingParamsError(CloudSolrClient client, ModifiableSolrParams origParams)
       throws IOException, SolrServerException {
 
     SolrRequest request;

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java Wed Dec 31 14:05:48 2014
@@ -17,13 +17,11 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import org.apache.solr.client.solrj.SolrServer;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.util.StrUtils;
 import org.junit.BeforeClass;
 
@@ -116,9 +114,9 @@ public class TestDistribDocBasedVersion
     log.info("### STARTING doTestHardFail");
 
     // use a leader so we test both forwarding and non-forwarding logic
-    ss = shardToLeaderJetty.get(bucket1).client.solrClient;
+    solrClient = shardToLeaderJetty.get(bucket1).client.solrClient;
 
-    // ss = cloudClient;   CloudSolrServer doesn't currently support propagating error codes
+    // solrClient = cloudClient;   CloudSolrServer doesn't currently support propagating error codes
 
     doTestHardFail("p!doc1");
     doTestHardFail("q!doc1");
@@ -139,7 +137,7 @@ public class TestDistribDocBasedVersion
     log.info("### STARTING doTestDocVersions");
     assertEquals(2, cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
 
-    ss = cloudClient;
+    solrClient = cloudClient;
 
     vadd("b!doc1", 10);
     vadd("c!doc2", 11);
@@ -183,7 +181,7 @@ public class TestDistribDocBasedVersion
     // now test with a non-smart client
     //
     // use a leader so we test both forwarding and non-forwarding logic
-    ss = shardToLeaderJetty.get(bucket1).client.solrClient;
+    solrClient = shardToLeaderJetty.get(bucket1).client.solrClient;
 
     vadd("b!doc5", 10);
     vadd("c!doc6", 11);
@@ -237,7 +235,7 @@ public class TestDistribDocBasedVersion
 
   }
 
-  SolrServer ss;
+  SolrClient solrClient;
 
   void vdelete(String id, long version, String... params) throws Exception {
     UpdateRequest req = new UpdateRequest();
@@ -246,7 +244,7 @@ public class TestDistribDocBasedVersion
     for (int i=0; i<params.length; i+=2) {
       req.setParam( params[i], params[i+1]);
     }
-    ss.request(req);
+    solrClient.request(req);
     // req.process(cloudClient);
   }
 
@@ -256,7 +254,7 @@ public class TestDistribDocBasedVersion
     for (int i=0; i<params.length; i+=2) {
       req.setParam( params[i], params[i+1]);
     }
-    ss.request(req);
+    solrClient.request(req);
   }
 
   void vaddFail(String id, long version, int errCode, String... params) throws Exception {
@@ -315,7 +313,7 @@ public class TestDistribDocBasedVersion
       expectedIds.put(strs.get(i), Long.valueOf(verS.get(i)));
     }
 
-    ss.query(params("qt","/get", "ids",ids));
+    solrClient.query(params("qt", "/get", "ids", ids));
 
     QueryResponse rsp = cloudClient.query(params("qt","/get", "ids",ids));
     Map<String, Object> obtainedIds = new HashMap<>();
@@ -327,7 +325,7 @@ public class TestDistribDocBasedVersion
   }
 
   void doRTG(String ids) throws Exception {
-    ss.query(params("qt","/get", "ids",ids));
+    solrClient.query(params("qt", "/get", "ids", ids));
 
     Set<String> expectedIds = new HashSet<>( StrUtils.splitSmart(ids, ",", true) );
 

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java Wed Dec 31 14:05:48 2014
@@ -17,18 +17,13 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.File;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrInputDocument;
@@ -52,7 +47,11 @@ import org.junit.rules.TestRule;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
+import java.io.File;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Test of the MiniSolrCloudCluster functionality. Keep in mind, 
@@ -118,11 +117,11 @@ public class TestMiniSolrCloudCluster ex
     assertTrue(startedServer.isRunning());
     assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
 
-    CloudSolrServer cloudSolrServer = null;
+    CloudSolrClient cloudSolrClient = null;
     SolrZkClient zkClient = null;
     try {
-      cloudSolrServer = new CloudSolrServer(miniCluster.getZkServer().getZkAddress(), true);
-      cloudSolrServer.connect();
+      cloudSolrClient = new CloudSolrClient(miniCluster.getZkServer().getZkAddress(), true);
+      cloudSolrClient.connect();
       zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(),
         AbstractZkTestCase.TIMEOUT, 45000, null);
 
@@ -131,20 +130,20 @@ public class TestMiniSolrCloudCluster ex
       String configName = "solrCloudCollectionConfig";
       System.setProperty("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
       uploadConfigToZk(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf", configName);
-      createCollection(cloudSolrServer, collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName);
+      createCollection(cloudSolrClient, collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName);
 
       // modify/query collection
-      cloudSolrServer.setDefaultCollection(collectionName);
+      cloudSolrClient.setDefaultCollection(collectionName);
       SolrInputDocument doc = new SolrInputDocument();
       doc.setField("id", "1");
 
       ZkStateReader zkStateReader = new ZkStateReader(zkClient);
       waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-      cloudSolrServer.add(doc);
-      cloudSolrServer.commit();
+      cloudSolrClient.add(doc);
+      cloudSolrClient.commit();
       SolrQuery query = new SolrQuery();
       query.setQuery("*:*");
-      QueryResponse rsp = cloudSolrServer.query(query);
+      QueryResponse rsp = cloudSolrClient.query(query);
       assertEquals(1, rsp.getResults().getNumFound());
 
       // remove a server not hosting any replicas
@@ -173,8 +172,8 @@ public class TestMiniSolrCloudCluster ex
         }
       }
     } finally {
-      if (cloudSolrServer != null) {
-        cloudSolrServer.shutdown();
+      if (cloudSolrClient != null) {
+        cloudSolrClient.shutdown();
       }
       if (zkClient != null) {
         zkClient.close();
@@ -217,7 +216,7 @@ public class TestMiniSolrCloudCluster ex
     zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + configName + "/" + nameInZk, file, false, true);
   }
 
-  protected NamedList<Object> createCollection(CloudSolrServer server, String name, int numShards,
+  protected NamedList<Object> createCollection(CloudSolrClient client, String name, int numShards,
       int replicationFactor, String configName) throws Exception {
     ModifiableSolrParams modParams = new ModifiableSolrParams();
     modParams.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name());
@@ -227,7 +226,7 @@ public class TestMiniSolrCloudCluster ex
     modParams.set("collection.configName", configName);
     QueryRequest request = new QueryRequest(modParams);
     request.setPath("/admin/collections");
-    return server.request(request);
+    return client.request(request);
   }
 
   protected void waitForRecoveriesToFinish(String collection,

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestModifyConfFiles.java Wed Dec 31 14:05:48 2014
@@ -18,7 +18,7 @@ package org.apache.solr.cloud;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -39,7 +39,7 @@ public class TestModifyConfFiles extends
   @Override
   public void doTest() throws Exception {
     int which = r.nextInt(clients.size());
-    HttpSolrServer client = (HttpSolrServer) clients.get(which);
+    HttpSolrClient client = (HttpSolrClient) clients.get(which);
 
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("op", "write");

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java Wed Dec 31 14:05:48 2014
@@ -16,19 +16,10 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -38,6 +29,12 @@ import org.apache.solr.common.util.Named
 import org.apache.zookeeper.KeeperException;
 import org.junit.Before;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 
 public class TestRebalanceLeaders extends AbstractFullDistribZkTestBase {
 
@@ -65,7 +62,7 @@ public class TestRebalanceLeaders extend
 
   @Override
   public void doTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     reps = random().nextInt(9) + 1; // make sure and do at least one.
     try {
       // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
@@ -247,11 +244,11 @@ public class TestRebalanceLeaders extend
     return true;
   }
 
-  byte[] getZkData(CloudSolrServer server, String path) {
+  byte[] getZkData(CloudSolrClient client, String path) {
     org.apache.zookeeper.data.Stat stat = new org.apache.zookeeper.data.Stat();
     long start = System.currentTimeMillis();
     try {
-      byte[] data = server.getZkStateReader().getZkClient().getData(path, null, stat, true);
+      byte[] data = client.getZkStateReader().getZkClient().getData(path, null, stat, true);
       if (data != null) {
         return data;
       }

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java Wed Dec 31 14:05:48 2014
@@ -26,7 +26,7 @@ import java.util.Map;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
@@ -58,7 +58,7 @@ public class TestReplicaProperties exten
 
   @Override
   public void doTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
       // shards, replicationfactor, maxreplicaspernode
@@ -81,7 +81,7 @@ public class TestReplicaProperties exten
   }
 
   private void listCollection() throws IOException, SolrServerException {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.LIST.toString());
@@ -101,7 +101,7 @@ public class TestReplicaProperties exten
 
 
   private void clusterAssignPropertyTest() throws Exception {
-    CloudSolrServer client = createCloudClient(null);
+    CloudSolrClient client = createCloudClient(null);
     try {
       client.connect();
       try {
@@ -204,7 +204,7 @@ public class TestReplicaProperties exten
     }
   }
 
-  private void verifyLeaderAssignment(CloudSolrServer client, String collectionName)
+  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
       throws InterruptedException, KeeperException {
     String lastFailMsg = "";
     for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
@@ -239,7 +239,7 @@ public class TestReplicaProperties exten
     fail(lastFailMsg);
   }
 
-  private void addProperty(CloudSolrServer client, String... paramsIn) throws IOException, SolrServerException {
+  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
     assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
     ModifiableSolrParams params = new ModifiableSolrParams();
     for (int idx = 0; idx < paramsIn.length; idx += 2) {

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java Wed Dec 31 14:05:48 2014
@@ -19,7 +19,7 @@ package org.apache.solr.cloud;
 
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -214,12 +214,12 @@ public class TestRequestStatusCollection
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
 
-    String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
         .getBaseURL();
     baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
 
-    HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
-    baseServer.setConnectionTimeout(15000);
-    return baseServer.request(request);
+    HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
+    baseClient.setConnectionTimeout(15000);
+    return baseClient.request(request);
   }
 }

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java Wed Dec 31 14:05:48 2014
@@ -18,7 +18,7 @@ package org.apache.solr.cloud;
  */
 
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.params.ShardParams;
@@ -48,7 +48,7 @@ public class TestShortCircuitedRequests
     // query shard3 directly with _route_=a! so that we trigger the short circuited request path
     Replica shard3 = cloudClient.getZkStateReader().getClusterState().getLeader(DEFAULT_COLLECTION, "shard3");
     String nodeName = shard3.getNodeName();
-    SolrServer shard3Client = getClient(nodeName);
+    SolrClient shard3Client = getClient(nodeName);
     QueryResponse response = shard3Client.query(new SolrQuery("*:*").add(ShardParams._ROUTE_, "a!").add(ShardParams.SHARDS_INFO, "true"));
 
     assertEquals("Could not find doc", 1, response.getResults().getNumFound());

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java Wed Dec 31 14:05:48 2014
@@ -17,19 +17,12 @@ package org.apache.solr.cloud;
  * limitations under the License.
  */
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Random;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
 import org.apache.solr.common.SolrInputDocument;
@@ -41,6 +34,13 @@ import org.apache.solr.util.DefaultSolrT
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
 /**
  * This test simply does a bunch of basic things in solrcloud mode and asserts things
  * work as expected.
@@ -92,12 +92,12 @@ public class UnloadDistributedZkTest ext
     createCmd.setDataDir(getDataDir(coreDataDir));
     createCmd.setNumShards(2);
     
-    SolrServer client = clients.get(0);
+    SolrClient client = clients.get(0);
     String url1 = getBaseUrl(client);
-    HttpSolrServer server = new HttpSolrServer(url1);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(60000);
-    server.request(createCmd);
+    HttpSolrClient adminClient = new HttpSolrClient(url1);
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(60000);
+    adminClient.request(createCmd);
     
     createCmd = new Create();
     createCmd.setCoreName("test_unload_shard_and_collection_2");
@@ -106,7 +106,7 @@ public class UnloadDistributedZkTest ext
     coreDataDir = createTempDir().toFile().getAbsolutePath();
     createCmd.setDataDir(getDataDir(coreDataDir));
     
-    server.request(createCmd);
+    adminClient.request(createCmd);
     
     // does not mean they are active and up yet :*
     waitForRecoveriesToFinish(collection, false);
@@ -114,10 +114,10 @@ public class UnloadDistributedZkTest ext
     // now unload one of the two
     Unload unloadCmd = new Unload(false);
     unloadCmd.setCoreName("test_unload_shard_and_collection_2");
-    server.request(unloadCmd);
+    adminClient.request(unloadCmd);
     
     // there should be only one shard
-    int slices = getCommonCloudSolrServer().getZkStateReader().getClusterState().getSlices(collection).size();
+    int slices = getCommonCloudSolrClient().getZkStateReader().getClusterState().getSlices(collection).size();
     long timeoutAt = System.currentTimeMillis() + 45000;
     while (slices != 1) {
       if (System.currentTimeMillis() > timeoutAt) {
@@ -126,20 +126,20 @@ public class UnloadDistributedZkTest ext
       }
       
       Thread.sleep(1000);
-      slices = getCommonCloudSolrServer().getZkStateReader().getClusterState().getSlices(collection).size();
+      slices = getCommonCloudSolrClient().getZkStateReader().getClusterState().getSlices(collection).size();
     }
     
     // now unload one of the other
     unloadCmd = new Unload(false);
     unloadCmd.setCoreName("test_unload_shard_and_collection_1");
-    server.request(unloadCmd);
-    server.shutdown();
-    server = null;
+    adminClient.request(unloadCmd);
+    adminClient.shutdown();
+    adminClient = null;
     
     //printLayout();
     // the collection should be gone
     timeoutAt = System.currentTimeMillis() + 30000;
-    while (getCommonCloudSolrServer().getZkStateReader().getClusterState().hasCollection(collection)) {
+    while (getCommonCloudSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
       if (System.currentTimeMillis() > timeoutAt) {
         printLayout();
         fail("Still found collection");
@@ -157,11 +157,11 @@ public class UnloadDistributedZkTest ext
     File tmpDir = createTempDir().toFile();
     
     // create a new collection collection
-    SolrServer client = clients.get(0);
+    SolrClient client = clients.get(0);
     String url1 = getBaseUrl(client);
-    HttpSolrServer server = new HttpSolrServer(url1);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(60000);
+    HttpSolrClient adminClient = new HttpSolrClient(url1);
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(60000);
     
     Create createCmd = new Create();
     createCmd.setCoreName("unloadcollection1");
@@ -169,11 +169,11 @@ public class UnloadDistributedZkTest ext
     createCmd.setNumShards(1);
     String core1DataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_1n";
     createCmd.setDataDir(getDataDir(core1DataDir));
-    server.request(createCmd);
-    server.shutdown();
-    server = null;
+    adminClient.request(createCmd);
+    adminClient.shutdown();
+    adminClient = null;
     
-    ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
+    ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
     
     zkStateReader.updateClusterState(true);
 
@@ -182,16 +182,16 @@ public class UnloadDistributedZkTest ext
     
     client = clients.get(1);
     String url2 = getBaseUrl(client);
-    server = new HttpSolrServer(url2);
+    adminClient = new HttpSolrClient(url2);
     
     createCmd = new Create();
     createCmd.setCoreName("unloadcollection2");
     createCmd.setCollection("unloadcollection");
     String core2dataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_2n";
     createCmd.setDataDir(getDataDir(core2dataDir));
-    server.request(createCmd);
-    server.shutdown();
-    server = null;
+    adminClient.request(createCmd);
+    adminClient.shutdown();
+    adminClient = null;
     
     zkStateReader.updateClusterState(true);
     slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
@@ -202,9 +202,9 @@ public class UnloadDistributedZkTest ext
     ZkCoreNodeProps leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
     
     Random random = random();
-    HttpSolrServer collectionClient;
+    HttpSolrClient collectionClient;
     if (random.nextBoolean()) {
-      collectionClient = new HttpSolrServer(leaderProps.getCoreUrl());
+      collectionClient = new HttpSolrClient(leaderProps.getCoreUrl());
       // lets try and use the solrj client to index and retrieve a couple
       // documents
       SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1,
@@ -224,16 +224,16 @@ public class UnloadDistributedZkTest ext
     // create another replica for our collection
     client = clients.get(2);
     String url3 = getBaseUrl(client);
-    server = new HttpSolrServer(url3);
+    adminClient = new HttpSolrClient(url3);
     
     createCmd = new Create();
     createCmd.setCoreName("unloadcollection3");
     createCmd.setCollection("unloadcollection");
     String core3dataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_3n";
     createCmd.setDataDir(getDataDir(core3dataDir));
-    server.request(createCmd);
-    server.shutdown();
-    server = null;
+    adminClient.request(createCmd);
+    adminClient.shutdown();
+    adminClient = null;
     
     
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
@@ -241,7 +241,7 @@ public class UnloadDistributedZkTest ext
     // so that we start with some versions when we reload...
     DirectUpdateHandler2.commitOnClose = false;
     
-    HttpSolrServer addClient = new HttpSolrServer(url3 + "/unloadcollection3");
+    HttpSolrClient addClient = new HttpSolrClient(url3 + "/unloadcollection3");
     addClient.setConnectionTimeout(30000);
 
     // add a few docs
@@ -257,7 +257,7 @@ public class UnloadDistributedZkTest ext
     //collectionClient.commit();
     
     // unload the leader
-    collectionClient = new HttpSolrServer(leaderProps.getBaseUrl());
+    collectionClient = new HttpSolrClient(leaderProps.getBaseUrl());
     collectionClient.setConnectionTimeout(15000);
     collectionClient.setSoTimeout(30000);
     
@@ -283,7 +283,7 @@ public class UnloadDistributedZkTest ext
     // ensure there is a leader
     zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
     
-    addClient = new HttpSolrServer(url2 + "/unloadcollection2");
+    addClient = new HttpSolrClient(url2 + "/unloadcollection2");
     addClient.setConnectionTimeout(30000);
     addClient.setSoTimeout(90000);
     
@@ -300,24 +300,24 @@ public class UnloadDistributedZkTest ext
     // create another replica for our collection
     client = clients.get(3);
     String url4 = getBaseUrl(client);
-    server = new HttpSolrServer(url4);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
+    adminClient = new HttpSolrClient(url4);
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(30000);
     
     createCmd = new Create();
     createCmd.setCoreName("unloadcollection4");
     createCmd.setCollection("unloadcollection");
     String core4dataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_4n";
     createCmd.setDataDir(getDataDir(core4dataDir));
-    server.request(createCmd);
-    server.shutdown();
-    server = null;
+    adminClient.request(createCmd);
+    adminClient.shutdown();
+    adminClient = null;
     
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
     
     // unload the leader again
     leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
-    collectionClient = new HttpSolrServer(leaderProps.getBaseUrl());
+    collectionClient = new HttpSolrClient(leaderProps.getBaseUrl());
     collectionClient.setConnectionTimeout(15000);
     collectionClient.setSoTimeout(30000);
     
@@ -343,64 +343,64 @@ public class UnloadDistributedZkTest ext
     DirectUpdateHandler2.commitOnClose = true;
     
     // bring the downed leader back as replica
-    server = new HttpSolrServer(leaderProps.getBaseUrl());
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
+    adminClient = new HttpSolrClient(leaderProps.getBaseUrl());
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(30000);
     
     createCmd = new Create();
     createCmd.setCoreName(leaderProps.getCoreName());
     createCmd.setCollection("unloadcollection");
     createCmd.setDataDir(getDataDir(core1DataDir));
-    server.request(createCmd);
-    server.shutdown();
-    server = null;
+    adminClient.request(createCmd);
+    adminClient.shutdown();
+    adminClient = null;
 
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
     
-    server = new HttpSolrServer(url2 + "/unloadcollection");
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    server.commit();
+    adminClient = new HttpSolrClient(url2 + "/unloadcollection");
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(30000);
+    adminClient.commit();
     SolrQuery q = new SolrQuery("*:*");
     q.set("distrib", false);
-    long found1 = server.query(q).getResults().getNumFound();
-    server.shutdown();
-    server = new HttpSolrServer(url3 + "/unloadcollection");
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    server.commit();
+    long found1 = adminClient.query(q).getResults().getNumFound();
+    adminClient.shutdown();
+    adminClient = new HttpSolrClient(url3 + "/unloadcollection");
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(30000);
+    adminClient.commit();
     q = new SolrQuery("*:*");
     q.set("distrib", false);
-    long found3 = server.query(q).getResults().getNumFound();
-    server.shutdown();
-    server = new HttpSolrServer(url4 + "/unloadcollection");
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    server.commit();
+    long found3 = adminClient.query(q).getResults().getNumFound();
+    adminClient.shutdown();
+    adminClient = new HttpSolrClient(url4 + "/unloadcollection");
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(30000);
+    adminClient.commit();
     q = new SolrQuery("*:*");
     q.set("distrib", false);
-    long found4 = server.query(q).getResults().getNumFound();
+    long found4 = adminClient.query(q).getResults().getNumFound();
     
     // all 3 shards should now have the same number of docs
     assertEquals(found1, found3);
     assertEquals(found3, found4);
-    server.shutdown();
+    adminClient.shutdown();
     
   }
   
   private void testUnloadLotsOfCores() throws Exception {
-    SolrServer client = clients.get(2);
+    SolrClient client = clients.get(2);
     String url3 = getBaseUrl(client);
-    final HttpSolrServer server = new HttpSolrServer(url3);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(60000);
+    final HttpSolrClient adminClient = new HttpSolrClient(url3);
+    adminClient.setConnectionTimeout(15000);
+    adminClient.setSoTimeout(60000);
     ThreadPoolExecutor executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
         5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
         new DefaultSolrThreadFactory("testExecutor"));
     int cnt = atLeast(3);
     
     // create the cores
-    createCores(server, executor, "multiunload", 2, cnt);
+    createCores(adminClient, executor, "multiunload", 2, cnt);
     
     executor.shutdown();
     executor.awaitTermination(120, TimeUnit.SECONDS);
@@ -415,7 +415,7 @@ public class UnloadDistributedZkTest ext
           Unload unloadCmd = new Unload(true);
           unloadCmd.setCoreName("multiunload" + freezeJ);
           try {
-            server.request(unloadCmd);
+            adminClient.request(unloadCmd);
           } catch (SolrServerException e) {
             throw new RuntimeException(e);
           } catch (IOException e) {
@@ -427,7 +427,7 @@ public class UnloadDistributedZkTest ext
     }
     executor.shutdown();
     executor.awaitTermination(120, TimeUnit.SECONDS);
-    server.shutdown();
+    adminClient.shutdown();
   }
 
 

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java Wed Dec 31 14:05:48 2014
@@ -17,11 +17,8 @@
 
 package org.apache.solr.cloud.hdfs;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.store.NRTCachingDirectory;
@@ -29,7 +26,7 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.cloud.BasicDistributedZkTest;
 import org.apache.solr.cloud.StopableIndexingThread;
 import org.apache.solr.core.CoreContainer;
@@ -44,8 +41,10 @@ import org.apache.solr.util.RefCounted;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 
 @Slow
 @Nightly
@@ -95,13 +94,13 @@ public class HdfsWriteToMultipleCollecti
     for (int i = 0; i < cnt; i++) {
       waitForRecoveriesToFinish(ACOLLECTION + i, false);
     }
-    List<CloudSolrServer> cloudServers = new ArrayList<>();
+    List<CloudSolrClient> cloudClients = new ArrayList<>();
     List<StopableIndexingThread> threads = new ArrayList<>();
     for (int i = 0; i < cnt; i++) {
-      CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress());
-      server.setDefaultCollection(ACOLLECTION + i);
-      cloudServers.add(server);
-      StopableIndexingThread indexThread = new StopableIndexingThread(null, server, "1", true, docCount);
+      CloudSolrClient client = new CloudSolrClient(zkServer.getZkAddress());
+      client.setDefaultCollection(ACOLLECTION + i);
+      cloudClients.add(client);
+      StopableIndexingThread indexThread = new StopableIndexingThread(null, client, "1", true, docCount);
       threads.add(indexThread);
       indexThread.start();
     }
@@ -113,13 +112,13 @@ public class HdfsWriteToMultipleCollecti
     }
    
     long collectionsCount = 0;
-    for (CloudSolrServer server : cloudServers) {
-      server.commit();
-      collectionsCount += server.query(new SolrQuery("*:*")).getResults().getNumFound();
+    for (CloudSolrClient client : cloudClients) {
+      client.commit();
+      collectionsCount += client.query(new SolrQuery("*:*")).getResults().getNumFound();
     }
     
-    for (CloudSolrServer server : cloudServers) {
-      server.shutdown();
+    for (CloudSolrClient client : cloudClients) {
+      client.shutdown();
     }
 
     assertEquals(addCnt, collectionsCount);

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java Wed Dec 31 14:05:48 2014
@@ -33,9 +33,9 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.BasicDistributedZkTest;
 import org.apache.solr.cloud.ChaosMonkey;
@@ -162,8 +162,8 @@ public class StressHdfsTest extends Basi
     List<String> dataDirs = new ArrayList<>();
     
     int i = 0;
-    for (SolrServer client : clients) {
-      HttpSolrServer c = new HttpSolrServer(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION);
+    for (SolrClient client : clients) {
+      HttpSolrClient c = new HttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION);
       try {
         int docCnt = random().nextInt(1000) + 1;
         for (int j = 0; j < docCnt; j++) {