You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ro...@apache.org on 2014/12/31 15:05:50 UTC
svn commit: r1648697 [3/13] - in /lucene/dev/trunk/solr: ./
contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/
contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/
contrib/map-reduce/src/java/org/apache/solr/hadoop...
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java Wed Dec 31 14:05:48 2014
@@ -17,22 +17,16 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import java.net.ConnectException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
import org.apache.http.client.HttpClient;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
+import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.core.Diagnostics;
@@ -44,7 +38,12 @@ import org.junit.BeforeClass;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import java.net.ConnectException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
@Slow
@SuppressSSL
@@ -253,7 +252,7 @@ public class ChaosMonkeyNothingIsSafeTes
zkServer.run();
}
- CloudSolrServer client = createCloudClient("collection1");
+ CloudSolrClient client = createCloudClient("collection1");
try {
createCollection(null, "testcollection",
1, 1, 1, client, null, "conf1");
@@ -294,11 +293,11 @@ public class ChaosMonkeyNothingIsSafeTes
private HttpClient httpClient = HttpClientUtil.createClient(null);
private volatile boolean stop = false;
int clientIndex = 0;
- private ConcurrentUpdateSolrServer suss;
- private List<SolrServer> clients;
+ private ConcurrentUpdateSolrClient cusc;
+ private List<SolrClient> clients;
private AtomicInteger fails = new AtomicInteger();
- public FullThrottleStopableIndexingThread(List<SolrServer> clients,
+ public FullThrottleStopableIndexingThread(List<SolrClient> clients,
String id, boolean doDeletes) {
super(controlClient, cloudClient, id, doDeletes);
setName("FullThrottleStopableIndexingThread");
@@ -306,12 +305,12 @@ public class ChaosMonkeyNothingIsSafeTes
this.clients = clients;
HttpClientUtil.setConnectionTimeout(httpClient, 15000);
HttpClientUtil.setSoTimeout(httpClient, 15000);
- suss = new ConcurrentUpdateSolrServer(
- ((HttpSolrServer) clients.get(0)).getBaseURL(), httpClient, 8,
+ cusc = new ConcurrentUpdateSolrClient(
+ ((HttpSolrClient) clients.get(0)).getBaseURL(), httpClient, 8,
2) {
@Override
public void handleError(Throwable ex) {
- log.warn("suss error", ex);
+ log.warn("cusc error", ex);
}
};
}
@@ -330,7 +329,7 @@ public class ChaosMonkeyNothingIsSafeTes
String delete = deletes.remove(0);
try {
numDeletes++;
- suss.deleteById(delete);
+ cusc.deleteById(delete);
} catch (Exception e) {
changeUrlOnError(e);
//System.err.println("REQUEST FAILED:");
@@ -350,7 +349,7 @@ public class ChaosMonkeyNothingIsSafeTes
50,
t1,
"Saxon heptarchies that used to rip around so in old times and raise Cain. My, you ought to seen old Henry the Eight when he was in bloom. He WAS a blossom. He used to marry a new wife every day, and chop off her head next morning. And he would do it just as indifferent as if ");
- suss.add(doc);
+ cusc.add(doc);
} catch (Exception e) {
changeUrlOnError(e);
//System.err.println("REQUEST FAILED:");
@@ -373,13 +372,13 @@ public class ChaosMonkeyNothingIsSafeTes
if (clientIndex > clients.size() - 1) {
clientIndex = 0;
}
- suss.shutdownNow();
- suss = new ConcurrentUpdateSolrServer(
- ((HttpSolrServer) clients.get(clientIndex)).getBaseURL(),
+ cusc.shutdownNow();
+ cusc = new ConcurrentUpdateSolrClient(
+ ((HttpSolrClient) clients.get(clientIndex)).getBaseURL(),
httpClient, 30, 3) {
@Override
public void handleError(Throwable ex) {
- log.warn("suss error", ex);
+ log.warn("cusc error", ex);
}
};
}
@@ -388,8 +387,8 @@ public class ChaosMonkeyNothingIsSafeTes
@Override
public void safeStop() {
stop = true;
- suss.blockUntilFinished();
- suss.shutdownNow();
+ cusc.blockUntilFinished();
+ cusc.shutdownNow();
httpClient.getConnectionManager().shutdown();
}
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java Wed Dec 31 14:05:48 2014
@@ -24,7 +24,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.core.Diagnostics;
import org.apache.solr.update.SolrCmdDistributor;
@@ -172,7 +172,7 @@ public class ChaosMonkeySafeLeaderTest e
zkServer.run();
}
- CloudSolrServer client = createCloudClient("collection1");
+ CloudSolrClient client = createCloudClient("collection1");
try {
createCollection(null, "testcollection",
1, 1, 1, client, null, "conf1");
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java Wed Dec 31 14:05:48 2014
@@ -18,7 +18,7 @@ package org.apache.solr.cloud;
*/
import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
@@ -77,7 +77,7 @@ public class ChaosMonkeyShardSplitTest e
Thread indexThread = null;
OverseerRestarter killer = null;
Thread killerThread = null;
- final SolrServer solrServer = clients.get(0);
+ final SolrClient solrClient = clients.get(0);
try {
del("*:*");
@@ -146,8 +146,8 @@ public class ChaosMonkeyShardSplitTest e
} finally {
if (indexThread != null)
indexThread.join();
- if (solrServer != null)
- solrServer.commit();
+ if (solrClient != null)
+ solrClient.commit();
if (killer != null) {
killer.run = false;
if (killerThread != null) {
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java Wed Dec 31 14:05:48 2014
@@ -18,9 +18,9 @@ package org.apache.solr.cloud;
*/
import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
import org.apache.solr.client.solrj.request.CollectionAdminRequest.RequestStatus;
@@ -69,16 +69,16 @@ public class CollectionsAPIAsyncDistribu
}
private void testSolrJAPICalls() throws Exception {
- SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
+ SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
Create createCollectionRequest = new Create();
createCollectionRequest.setCollectionName("testasynccollectioncreation");
createCollectionRequest.setNumShards(1);
createCollectionRequest.setConfigName("conf1");
createCollectionRequest.setAsyncId("1001");
- createCollectionRequest.process(server);
+ createCollectionRequest.process(client);
- String state = getRequestStateAfterCompletion("1001", MAX_TIMEOUT_SECONDS, server);
+ String state = getRequestStateAfterCompletion("1001", MAX_TIMEOUT_SECONDS, client);
assertEquals("CreateCollection task did not complete!", "completed", state);
@@ -88,9 +88,9 @@ public class CollectionsAPIAsyncDistribu
createCollectionRequest.setNumShards(1);
createCollectionRequest.setConfigName("conf1");
createCollectionRequest.setAsyncId("1002");
- createCollectionRequest.process(server);
+ createCollectionRequest.process(client);
- state = getRequestStateAfterCompletion("1002", MAX_TIMEOUT_SECONDS, server);
+ state = getRequestStateAfterCompletion("1002", MAX_TIMEOUT_SECONDS, client);
assertEquals("Recreating a collection with the same name didn't fail, should have.", "failed", state);
@@ -98,8 +98,8 @@ public class CollectionsAPIAsyncDistribu
addReplica.setCollectionName("testasynccollectioncreation");
addReplica.setShardName("shard1");
addReplica.setAsyncId("1003");
- server.request(addReplica);
- state = getRequestStateAfterCompletion("1003", MAX_TIMEOUT_SECONDS, server);
+ client.request(addReplica);
+ state = getRequestStateAfterCompletion("1003", MAX_TIMEOUT_SECONDS, client);
assertEquals("Add replica did not complete", "completed", state);
@@ -107,18 +107,18 @@ public class CollectionsAPIAsyncDistribu
splitShardRequest.setCollectionName("testasynccollectioncreation");
splitShardRequest.setShardName("shard1");
splitShardRequest.setAsyncId("1004");
- splitShardRequest.process(server);
+ splitShardRequest.process(client);
- state = getRequestStateAfterCompletion("1004", MAX_TIMEOUT_SECONDS * 2, server);
+ state = getRequestStateAfterCompletion("1004", MAX_TIMEOUT_SECONDS * 2, client);
assertEquals("Shard split did not complete. Last recorded state: " + state, "completed", state);
}
- private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrServer server)
+ private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client)
throws IOException, SolrServerException {
String state = null;
while(waitForSeconds-- > 0) {
- state = getRequestState(requestId, server);
+ state = getRequestState(requestId, client);
if(state.equals("completed") || state.equals("failed"))
return state;
try {
@@ -129,10 +129,10 @@ public class CollectionsAPIAsyncDistribu
return state;
}
- private String getRequestState(String requestId, SolrServer server) throws IOException, SolrServerException {
+ private String getRequestState(String requestId, SolrClient client) throws IOException, SolrServerException {
RequestStatus request = new RequestStatus();
request.setRequestId(requestId);
- CollectionAdminResponse response = request.process(server);
+ CollectionAdminResponse response = request.process(client);
NamedList innerResponse = (NamedList) response.getResponse().get("status");
return (String) innerResponse.get("state");
}
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java Wed Dec 31 14:05:48 2014
@@ -17,39 +17,15 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import javax.management.MBeanServer;
-import javax.management.MBeanServerFactory;
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.Future;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer.RemoteSolrException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
@@ -84,6 +60,31 @@ import org.apache.solr.util.DefaultSolrT
import org.junit.Before;
import org.junit.BeforeClass;
+import javax.management.MBeanServer;
+import javax.management.MBeanServerFactory;
+import javax.management.ObjectName;
+import java.io.File;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
@@ -213,7 +214,7 @@ public class CollectionsAPIDistributedZk
private void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
// we can use this client because we just want base url
- final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
+ final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
String collectionName = "out_of_sync_collection";
@@ -230,7 +231,7 @@ public class CollectionsAPIDistributedZk
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
try {
- NamedList<Object> resp = createNewSolrServer("", baseUrl)
+ NamedList<Object> resp = createNewSolrClient("", baseUrl)
.request(request);
fail("Expected to fail, because collection is not in clusterstate");
} catch (RemoteSolrException e) {
@@ -244,7 +245,7 @@ public class CollectionsAPIDistributedZk
}
private void deletePartiallyCreatedCollection() throws Exception {
- final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
+ final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
String collectionName = "halfdeletedcollection";
Create createCmd = new Create();
createCmd.setCoreName("halfdeletedcollection_shard1_replica1");
@@ -255,7 +256,7 @@ public class CollectionsAPIDistributedZk
if (secondConfigSet) {
createCmd.setCollectionConfigName("conf1");
}
- createNewSolrServer("", baseUrl).request(createCmd);
+ createNewSolrClient("", baseUrl).request(createCmd);
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.DELETE.toString());
@@ -263,7 +264,7 @@ public class CollectionsAPIDistributedZk
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- NamedList<Object> resp = createNewSolrServer("", baseUrl).request(request);
+ NamedList<Object> resp = createNewSolrClient("", baseUrl).request(request);
checkForMissingCollection(collectionName);
@@ -277,19 +278,19 @@ public class CollectionsAPIDistributedZk
if (secondConfigSet) {
params.set("collection.configName", "conf1");
}
- resp = createNewSolrServer("", baseUrl).request(request);
+ resp = createNewSolrClient("", baseUrl).request(request);
}
private void deleteCollectionWithDownNodes() throws Exception {
- String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
+ String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
// now try to remove a collection when a couple of its nodes are down
if (secondConfigSet) {
createCollection(null, "halfdeletedcollection2", 3, 3, 6,
- createNewSolrServer("", baseUrl), null, "conf2");
+ createNewSolrClient("", baseUrl), null, "conf2");
} else {
createCollection(null, "halfdeletedcollection2", 3, 3, 6,
- createNewSolrServer("", baseUrl), null);
+ createNewSolrClient("", baseUrl), null);
}
waitForRecoveriesToFinish("halfdeletedcollection2", false);
@@ -303,7 +304,7 @@ public class CollectionsAPIDistributedZk
cloudClient.getZkStateReader().getLeaderRetry("halfdeletedcollection2", "shard" + i, 30000);
}
- baseUrl = getBaseUrl((HttpSolrServer) clients.get(2));
+ baseUrl = getBaseUrl((HttpSolrClient) clients.get(2));
// remove a collection
ModifiableSolrParams params = new ModifiableSolrParams();
@@ -312,7 +313,7 @@ public class CollectionsAPIDistributedZk
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- createNewSolrServer("", baseUrl).request(request);
+ createNewSolrClient("", baseUrl).request(request);
long timeout = System.currentTimeMillis() + 10000;
while (cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2")) {
@@ -329,7 +330,7 @@ public class CollectionsAPIDistributedZk
}
private void testErrorHandling() throws Exception {
- final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
+ final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
// try a bad action
@@ -343,7 +344,7 @@ public class CollectionsAPIDistributedZk
boolean gotExp = false;
NamedList<Object> resp = null;
try {
- resp = createNewSolrServer("", baseUrl).request(request);
+ resp = createNewSolrClient("", baseUrl).request(request);
} catch (SolrException e) {
gotExp = true;
}
@@ -365,7 +366,7 @@ public class CollectionsAPIDistributedZk
gotExp = false;
resp = null;
try {
- resp = createNewSolrServer("", baseUrl).request(request);
+ resp = createNewSolrClient("", baseUrl).request(request);
} catch (SolrException e) {
gotExp = true;
}
@@ -385,7 +386,7 @@ public class CollectionsAPIDistributedZk
request.setPath("/admin/collections");
gotExp = false;
try {
- resp = createNewSolrServer("", baseUrl).request(request);
+ resp = createNewSolrClient("", baseUrl).request(request);
} catch (SolrException e) {
gotExp = true;
}
@@ -405,7 +406,7 @@ public class CollectionsAPIDistributedZk
gotExp = false;
resp = null;
try {
- resp = createNewSolrServer("", baseUrl).request(request);
+ resp = createNewSolrClient("", baseUrl).request(request);
} catch (SolrException e) {
gotExp = true;
}
@@ -426,7 +427,7 @@ public class CollectionsAPIDistributedZk
gotExp = false;
resp = null;
try {
- resp = createNewSolrServer("", baseUrl).request(request);
+ resp = createNewSolrClient("", baseUrl).request(request);
} catch (SolrException e) {
gotExp = true;
}
@@ -445,7 +446,7 @@ public class CollectionsAPIDistributedZk
if (secondConfigSet) {
createCmd.setCollectionConfigName("conf1");
}
- createNewSolrServer("", baseUrl).request(createCmd);
+ createNewSolrClient("", baseUrl).request(createCmd);
createCmd = new Create();
createCmd.setCoreName("halfcollection_shard1_replica1");
@@ -456,7 +457,7 @@ public class CollectionsAPIDistributedZk
if (secondConfigSet) {
createCmd.setCollectionConfigName("conf1");
}
- createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(1))).request(createCmd);
+ createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(1))).request(createCmd);
params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
@@ -476,7 +477,7 @@ public class CollectionsAPIDistributedZk
request = new QueryRequest(params);
request.setPath("/admin/collections");
gotExp = false;
- resp = createNewSolrServer("", baseUrl).request(request);
+ resp = createNewSolrClient("", baseUrl).request(request);
SimpleOrderedMap success = (SimpleOrderedMap) resp.get("success");
SimpleOrderedMap failure = (SimpleOrderedMap) resp.get("failure");
@@ -506,14 +507,14 @@ public class CollectionsAPIDistributedZk
createCmd.setCollectionConfigName("conf1");
}
- createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(1)))
+ createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(1)))
.request(createCmd);
// try and create a SolrCore with no collection name
createCmd.setCollection(null);
createCmd.setCoreName("corewithnocollection2");
- createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(1)))
+ createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(1)))
.request(createCmd);
// in both cases, the collection should have default to the core name
@@ -524,7 +525,7 @@ public class CollectionsAPIDistributedZk
private void testNodesUsedByCreate() throws Exception {
// we can use this client because we just want base url
- final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
+ final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
@@ -541,7 +542,7 @@ public class CollectionsAPIDistributedZk
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- createNewSolrServer("", baseUrl).request(request);
+ createNewSolrClient("", baseUrl).request(request);
List<Integer> numShardsNumReplicaList = new ArrayList<>();
numShardsNumReplicaList.add(2);
@@ -572,7 +573,7 @@ public class CollectionsAPIDistributedZk
private void testCollectionsAPI() throws Exception {
boolean disableLegacy = random().nextBoolean();
- CloudSolrServer client1 = null;
+ CloudSolrClient client1 = null;
if (disableLegacy) {
log.info("legacyCloud=false");
@@ -592,11 +593,11 @@ public class CollectionsAPIDistributedZk
for (int i = 0; i < cnt; i++) {
int numShards = TestUtil.nextInt(random(), 0, shardCount) + 1;
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
- int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+ int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
- CloudSolrServer client = null;
+ CloudSolrClient client = null;
try {
if (i == 0) {
// Test if we can create a collection through CloudSolrServer where
@@ -637,7 +638,7 @@ public class CollectionsAPIDistributedZk
String url = getUrlFromZk(collection);
- HttpSolrServer collectionClient = new HttpSolrServer(url);
+ HttpSolrClient collectionClient = new HttpSolrClient(url);
// poll for a second - it can take a moment before we are ready to serve
waitForNon403or404or503(collectionClient);
@@ -657,7 +658,7 @@ public class CollectionsAPIDistributedZk
String url = getUrlFromZk(collection);
- HttpSolrServer collectionClient = new HttpSolrServer(url);
+ HttpSolrClient collectionClient = new HttpSolrClient(url);
// poll for a second - it can take a moment before we are ready to serve
waitForNon403or404or503(collectionClient);
@@ -678,7 +679,7 @@ public class CollectionsAPIDistributedZk
ChaosMonkey.causeConnectionLoss(jetty);
}
- ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
+ ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
for (int j = 0; j < cnt; j++) {
waitForRecoveriesToFinish("awholynewcollection_" + j, zkStateReader, false);
@@ -704,7 +705,7 @@ public class CollectionsAPIDistributedZk
String url = getUrlFromZk(collectionName);
- HttpSolrServer collectionClient = new HttpSolrServer(url);
+ HttpSolrClient collectionClient = new HttpSolrClient(url);
// lets try and use the solrj client to index a couple documents
@@ -740,9 +741,9 @@ public class CollectionsAPIDistributedZk
request.setPath("/admin/collections");
// we can use this client because we just want base url
- final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
+ final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
- createNewSolrServer("", baseUrl).request(request);
+ createNewSolrClient("", baseUrl).request(request);
// reloads make take a short while
boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
@@ -758,7 +759,7 @@ public class CollectionsAPIDistributedZk
request = new QueryRequest(params);
request.setPath("/admin/collections");
- createNewSolrServer("", baseUrl).request(request);
+ createNewSolrClient("", baseUrl).request(request);
// ensure its out of the state
checkForMissingCollection(collectionName);
@@ -774,7 +775,7 @@ public class CollectionsAPIDistributedZk
boolean exp = false;
try {
- createNewSolrServer("", baseUrl).request(request);
+ createNewSolrClient("", baseUrl).request(request);
} catch (SolrException e) {
exp = true;
}
@@ -794,7 +795,7 @@ public class CollectionsAPIDistributedZk
}
request = new QueryRequest(params);
request.setPath("/admin/collections");
- createNewSolrServer("", baseUrl).request(request);
+ createNewSolrClient("", baseUrl).request(request);
List<Integer> list = new ArrayList<>(2);
list.add(1);
@@ -803,7 +804,7 @@ public class CollectionsAPIDistributedZk
url = getUrlFromZk(collectionName);
- collectionClient = new HttpSolrServer(url);
+ collectionClient = new HttpSolrClient(url);
// poll for a second - it can take a moment before we are ready to serve
waitForNon403or404or503(collectionClient);
@@ -815,12 +816,12 @@ public class CollectionsAPIDistributedZk
}
// test maxShardsPerNode
- int numLiveNodes = getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
+ int numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
int numShards = (numLiveNodes/2) + 1;
int replicationFactor = 2;
int maxShardsPerNode = 1;
collectionInfos = new HashMap<>();
- CloudSolrServer client = createCloudClient("awholynewcollection_" + cnt);
+ CloudSolrClient client = createCloudClient("awholynewcollection_" + cnt);
try {
exp = false;
try {
@@ -836,12 +837,12 @@ public class CollectionsAPIDistributedZk
// Test createNodeSet
- numLiveNodes = getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
+ numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
List<String> createNodeList = new ArrayList<>();
int numOfCreateNodes = numLiveNodes/2;
assertFalse("createNodeSet test is pointless with only " + numLiveNodes + " nodes running", numOfCreateNodes == 0);
int i = 0;
- for (String liveNode : getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes()) {
+ for (String liveNode : getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes()) {
if (i < numOfCreateNodes) {
createNodeList.add(liveNode);
i++;
@@ -888,10 +889,10 @@ public class CollectionsAPIDistributedZk
String collectionName = "awholynewstresscollection_" + name + "_" + i;
int numShards = TestUtil.nextInt(random(), 0, shardCount * 2) + 1;
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
- int maxShardsPerNode = (((numShards * 2 * replicationFactor) / getCommonCloudSolrServer()
+ int maxShardsPerNode = (((numShards * 2 * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
- CloudSolrServer client = null;
+ CloudSolrClient client = null;
try {
if (i == 0) {
client = createCloudClient(null);
@@ -993,7 +994,7 @@ public class CollectionsAPIDistributedZk
private void collectStartTimes(String collectionName,
Map<String,Long> urlToTime) throws SolrServerException, IOException {
- ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader()
+ ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader()
.getClusterState();
// Map<String,DocCollection> collections = clusterState.getCollectionStates();
if (clusterState.hasCollection(collectionName)) {
@@ -1008,7 +1009,7 @@ public class CollectionsAPIDistributedZk
while (shardIt.hasNext()) {
Entry<String,Replica> shardEntry = shardIt.next();
ZkCoreNodeProps coreProps = new ZkCoreNodeProps(shardEntry.getValue());
- HttpSolrServer server = new HttpSolrServer(coreProps.getBaseUrl());
+ HttpSolrClient server = new HttpSolrClient(coreProps.getBaseUrl());
CoreAdminResponse mcr;
try {
mcr = CoreAdminRequest.getStatus(coreProps.getCoreName(), server);
@@ -1026,7 +1027,7 @@ public class CollectionsAPIDistributedZk
}
private String getUrlFromZk(String collection) {
- ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
+ ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
Map<String,Slice> slices = clusterState.getSlicesMap(collection);
if (slices == null) {
@@ -1122,6 +1123,58 @@ public class CollectionsAPIDistributedZk
}
+ private void addReplicaTest() throws Exception {
+ String collectionName = "addReplicaColl";
+ CloudSolrClient client = createCloudClient(null);
+ try {
+ createCollection(collectionName, client, 2, 2);
+ String newReplicaName = Assign.assignNode(collectionName, client.getZkStateReader().getClusterState());
+ ArrayList<String> nodeList = new ArrayList<>(client.getZkStateReader().getClusterState().getLiveNodes());
+ Collections.shuffle(nodeList, random());
+ CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica();
+ addReplica.setCollectionName(collectionName);
+ addReplica.setShardName("shard1");
+ addReplica.setNode(nodeList.get(0));
+ client.request(addReplica);
+
+ long timeout = System.currentTimeMillis() + 3000;
+ Replica newReplica = null;
+
+ for (; System.currentTimeMillis() < timeout; ) {
+ Slice slice = client.getZkStateReader().getClusterState().getSlice(collectionName, "shard1");
+ newReplica = slice.getReplica(newReplicaName);
+ }
+
+ assertNotNull(newReplica);
+
+ log.info("newReplica {},\n{} ", newReplica, client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)));
+
+ assertEquals("Replica should be created on the right node",
+ client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)), newReplica.getStr(ZkStateReader.BASE_URL_PROP));
+
+ newReplicaName = Assign.assignNode(collectionName, client.getZkStateReader().getClusterState());
+ addReplica = new CollectionAdminRequest.AddReplica();
+ addReplica.setCollectionName(collectionName);
+ addReplica.setShardName("shard2");
+ client.request(addReplica);
+
+ timeout = System.currentTimeMillis() + 3000;
+ newReplica = null;
+
+ for (; System.currentTimeMillis() < timeout; ) {
+ Slice slice = client.getZkStateReader().getClusterState().getSlice(collectionName, "shard2");
+ newReplica = slice.getReplica(newReplicaName);
+ }
+
+ assertNotNull(newReplica);
+
+
+ } finally {
+ client.shutdown();
+ }
+
+ }
+
@Override
protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException {
@@ -1131,12 +1184,12 @@ public class CollectionsAPIDistributedZk
if (r.nextBoolean())
params.set("collection",DEFAULT_COLLECTION);
- QueryResponse rsp = getCommonCloudSolrServer().query(params);
+ QueryResponse rsp = getCommonCloudSolrClient().query(params);
return rsp;
}
- protected void createCollection(String COLL_NAME, CloudSolrServer client,int replicationFactor , int numShards ) throws Exception {
- int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
+ protected void createCollection(String COLL_NAME, CloudSolrClient client,int replicationFactor , int numShards ) throws Exception {
+ int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
Map<String, Object> props = makeMap(
@@ -1160,7 +1213,7 @@ public class CollectionsAPIDistributedZk
}
private void clusterPropTest() throws Exception {
- CloudSolrServer client = createCloudClient(null);
+ CloudSolrClient client = createCloudClient(null);
assertTrue("cluster property not set", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, "false"));
assertTrue("cluster property not unset ", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, null));
@@ -1168,7 +1221,7 @@ public class CollectionsAPIDistributedZk
client.shutdown();
}
- public static boolean setClusterProp(CloudSolrServer client, String name , String val) throws SolrServerException, IOException, InterruptedException {
+ public static boolean setClusterProp(CloudSolrClient client, String name , String val) throws SolrServerException, IOException, InterruptedException {
Map m = makeMap(
"action", CollectionAction.CLUSTERPROP.toLower(),
"name",name);
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTests.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTests.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTests.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTests.java Wed Dec 31 14:05:48 2014
@@ -21,7 +21,7 @@ import org.apache.commons.codec.binary.S
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
@@ -269,7 +269,7 @@ public class CollectionsAPISolrJTests ex
Replica replica1 = testCollection.getReplica("core_node1");
- HttpSolrServer solrServer = new HttpSolrServer(replica1.getStr("base_url"));
+ HttpSolrClient solrServer = new HttpSolrClient(replica1.getStr("base_url"));
try {
CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), solrServer);
NamedList<Object> coreStatus = status.getCoreStatus(replica1.getStr("core"));
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java Wed Dec 31 14:05:48 2014
@@ -43,8 +43,8 @@ import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
@@ -149,11 +149,11 @@ public class CustomCollectionTest extend
for (int i = 0; i < cnt; i++) {
int numShards = 3;
- int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
+ int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
- CloudSolrServer client = null;
+ CloudSolrClient client = null;
try {
if (i == 0) {
// Test if we can create a collection through CloudSolrServer where
@@ -193,15 +193,15 @@ public class CustomCollectionTest extend
List<Integer> list = entry.getValue();
checkForCollection(collection, list, null);
- String url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collection);
+ String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collection);
- HttpSolrServer collectionClient = new HttpSolrServer(url);
+ HttpSolrClient collectionClient = new HttpSolrClient(url);
// poll for a second - it can take a moment before we are ready to serve
waitForNon403or404or503(collectionClient);
collectionClient.shutdown();
}
- ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
+ ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
for (int j = 0; j < cnt; j++) {
waitForRecoveriesToFinish(COLL_PREFIX + j, zkStateReader, false);
}
@@ -221,9 +221,9 @@ public class CustomCollectionTest extend
String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));
- String url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
+ String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
- HttpSolrServer collectionClient = new HttpSolrServer(url);
+ HttpSolrClient collectionClient = new HttpSolrClient(url);
// lets try and use the solrj client to index a couple documents
@@ -271,7 +271,7 @@ public class CustomCollectionTest extend
params.set("shard", "x");
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))).request(request);
+ createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0))).request(request);
waitForCollection(zkStateReader,collectionName,4);
//wait for all the replicas to become active
int attempts = 0;
@@ -295,11 +295,11 @@ public class CustomCollectionTest extend
int numShards = 4;
replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
- int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+ int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
- CloudSolrServer client = null;
+ CloudSolrClient client = null;
String shard_fld = "shard_s";
try {
client = createCloudClient(null);
@@ -320,10 +320,10 @@ public class CustomCollectionTest extend
checkForCollection(collectionName, list, null);
- url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
+ url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
collectionClient.shutdown();
- collectionClient = new HttpSolrServer(url);
+ collectionClient = new HttpSolrClient(url);
// poll for a second - it can take a moment before we are ready to serve
waitForNon403or404or503(collectionClient);
@@ -331,7 +331,7 @@ public class CustomCollectionTest extend
collectionClient.shutdown();
- collectionClient = new HttpSolrServer(url);
+ collectionClient = new HttpSolrClient(url);
// lets try and use the solrj client to index a couple documents
@@ -358,11 +358,11 @@ public class CustomCollectionTest extend
String collectionName = "routeFieldColl";
int numShards = 4;
int replicationFactor = 2;
- int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+ int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
- CloudSolrServer client = null;
+ CloudSolrClient client = null;
String shard_fld = "shard_s";
try {
client = createCloudClient(null);
@@ -381,16 +381,16 @@ public class CustomCollectionTest extend
checkForCollection(collectionName, list, null);
- String url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
+ String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
- HttpSolrServer collectionClient = new HttpSolrServer(url);
+ HttpSolrClient collectionClient = new HttpSolrClient(url);
// poll for a second - it can take a moment before we are ready to serve
waitForNon403or404or503(collectionClient);
collectionClient.shutdown();
- collectionClient = new HttpSolrServer(url);
+ collectionClient = new HttpSolrClient(url);
// lets try and use the solrj client to index a couple documents
@@ -422,7 +422,7 @@ public class CustomCollectionTest extend
private void testCreateShardRepFactor() throws Exception {
String collectionName = "testCreateShardRepFactor";
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
- CloudSolrServer client = null;
+ CloudSolrClient client = null;
try {
client = createCloudClient(null);
Map<String, Object> props = ZkNodeProps.makeMap(
@@ -436,7 +436,7 @@ public class CustomCollectionTest extend
} finally {
if (client != null) client.shutdown();
}
- ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
+ ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
waitForRecoveriesToFinish(collectionName, zkStateReader, false);
ModifiableSolrParams params = new ModifiableSolrParams();
@@ -445,7 +445,7 @@ public class CustomCollectionTest extend
params.set("shard", "x");
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))).request(request);
+ createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0))).request(request);
waitForRecoveriesToFinish(collectionName, zkStateReader, false);
@@ -473,7 +473,7 @@ public class CustomCollectionTest extend
if (r.nextBoolean())
params.set("collection",DEFAULT_COLLECTION);
- QueryResponse rsp = getCommonCloudSolrServer().query(params);
+ QueryResponse rsp = getCommonCloudSolrClient().query(params);
return rsp;
}
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteInactiveReplicaTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteInactiveReplicaTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteInactiveReplicaTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteInactiveReplicaTest.java Wed Dec 31 14:05:48 2014
@@ -17,16 +17,10 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import static org.apache.solr.cloud.CollectionsAPIDistributedZkTest.setClusterProp;
-import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
-
-import java.net.URL;
-import java.util.Map;
-
-import org.apache.solr.client.solrj.SolrServer;
+import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
@@ -38,6 +32,12 @@ import org.apache.solr.common.util.Named
import org.junit.After;
import org.junit.Before;
+import java.net.URL;
+import java.util.Map;
+
+import static org.apache.solr.cloud.CollectionsAPIDistributedZkTest.setClusterProp;
+import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
+
//@Ignore("Not currently valid see SOLR-5580")
public class DeleteInactiveReplicaTest extends DeleteReplicaTest{
@@ -57,7 +57,7 @@ public class DeleteInactiveReplicaTest e
}
private void deleteInactiveReplicaTest() throws Exception {
- CloudSolrServer client = createCloudClient(null);
+ CloudSolrClient client = createCloudClient(null);
String collectionName = "delDeadColl";
@@ -131,12 +131,12 @@ public class DeleteInactiveReplicaTest e
Map m = makeMap("qt", "/admin/cores", "action", "status");
- SolrServer server = new HttpSolrServer(replica1.getStr(ZkStateReader.BASE_URL_PROP));
- NamedList<Object> resp = server.request(new QueryRequest(new MapSolrParams(m)));
+ SolrClient queryClient = new HttpSolrClient(replica1.getStr(ZkStateReader.BASE_URL_PROP));
+ NamedList<Object> resp = queryClient.request(new QueryRequest(new MapSolrParams(m)));
assertNull("The core is up and running again",
((NamedList) resp.get("status")).get(replica1.getStr("core")));
- server.shutdown();
- server = null;
+ queryClient.shutdown();
+ queryClient = null;
Exception exp = null;
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java Wed Dec 31 14:05:48 2014
@@ -19,13 +19,12 @@ package org.apache.solr.cloud;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.ImplicitDocRouter;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.MapSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.junit.After;
@@ -47,7 +46,7 @@ import static org.apache.solr.common.par
@Ignore("SOLR-6347")
public class DeleteLastCustomShardedReplicaTest extends AbstractFullDistribZkTestBase {
- private CloudSolrServer client;
+ private CloudSolrClient client;
@BeforeClass
public static void beforeThisClass2() throws Exception {
@@ -102,7 +101,7 @@ public class DeleteLastCustomShardedRepl
waitForRecoveriesToFinish(collectionName, false);
- DocCollection testcoll = getCommonCloudSolrServer().getZkStateReader()
+ DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
.getClusterState().getCollection(collectionName);
Replica replica = testcoll.getSlice("a").getReplicas().iterator().next();
@@ -121,7 +120,7 @@ public class DeleteLastCustomShardedRepl
boolean success = false;
DocCollection testcoll = null;
while (System.currentTimeMillis() < endAt) {
- testcoll = getCommonCloudSolrServer().getZkStateReader()
+ testcoll = getCommonCloudSolrClient().getZkStateReader()
.getClusterState().getCollection(COLL_NAME);
// In case of a custom sharded collection, the last replica deletion would also lead to
// the deletion of the slice.
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java Wed Dec 31 14:05:48 2014
@@ -17,22 +17,10 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionProcessor.ONLY_IF_DOWN;
-import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.CoreAdminResponse;
@@ -48,8 +36,20 @@ import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.ONLY_IF_DOWN;
+import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
+
public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
- private CloudSolrServer client;
+ private CloudSolrClient client;
@BeforeClass
public static void beforeThisClass2() throws Exception {
@@ -91,13 +91,13 @@ public class DeleteReplicaTest extends A
private void deleteLiveReplicaTest() throws Exception {
String collectionName = "delLiveColl";
- CloudSolrServer client = createCloudClient(null);
+ CloudSolrClient client = createCloudClient(null);
try {
createCollection(collectionName, client);
waitForRecoveriesToFinish(collectionName, false);
- DocCollection testcoll = getCommonCloudSolrServer().getZkStateReader()
+ DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
.getClusterState().getCollection(collectionName);
Slice shard1 = null;
@@ -120,14 +120,14 @@ public class DeleteReplicaTest extends A
if (replica1 == null) fail("no active replicas found");
- HttpSolrServer replica1Server = new HttpSolrServer(replica1.getStr("base_url"));
+ HttpSolrClient replica1Client = new HttpSolrClient(replica1.getStr("base_url"));
String dataDir = null;
try {
- CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), replica1Server);
+ CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), replica1Client);
NamedList<Object> coreStatus = status.getCoreStatus(replica1.getStr("core"));
dataDir = (String) coreStatus.get("dataDir");
} finally {
- replica1Server.shutdown();
+ replica1Client.shutdown();
}
try {
// Should not be able to delete a replica that is up if onlyIfDown=true.
@@ -149,7 +149,7 @@ public class DeleteReplicaTest extends A
}
}
- protected void tryToRemoveOnlyIfDown(String collectionName, CloudSolrServer client, Replica replica, String shard) throws IOException, SolrServerException {
+ protected void tryToRemoveOnlyIfDown(String collectionName, CloudSolrClient client, Replica replica, String shard) throws IOException, SolrServerException {
Map m = makeMap("collection", collectionName,
"action", DELETEREPLICA.toLower(),
"shard", shard,
@@ -162,7 +162,7 @@ public class DeleteReplicaTest extends A
}
protected void removeAndWaitForReplicaGone(String COLL_NAME,
- CloudSolrServer client, Replica replica, String shard)
+ CloudSolrClient client, Replica replica, String shard)
throws SolrServerException, IOException, InterruptedException {
Map m = makeMap("collection", COLL_NAME, "action", DELETEREPLICA.toLower(), "shard",
shard, "replica", replica.getName());
@@ -174,7 +174,7 @@ public class DeleteReplicaTest extends A
boolean success = false;
DocCollection testcoll = null;
while (System.currentTimeMillis() < endAt) {
- testcoll = getCommonCloudSolrServer().getZkStateReader()
+ testcoll = getCommonCloudSolrClient().getZkStateReader()
.getClusterState().getCollection(COLL_NAME);
success = testcoll.getSlice(shard).getReplica(replica.getName()) == null;
if (success) {
@@ -188,10 +188,10 @@ public class DeleteReplicaTest extends A
assertTrue("Replica not cleaned up", success);
}
- protected void createCollection(String COLL_NAME, CloudSolrServer client) throws Exception {
+ protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
int replicationFactor = 2;
int numShards = 2;
- int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
+ int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
Map<String, Object> props = makeMap(
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java Wed Dec 31 14:05:48 2014
@@ -19,7 +19,7 @@ package org.apache.solr.cloud;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.SolrException;
@@ -94,7 +94,7 @@ public class DeleteShardTest extends Abs
try {
deleteShard(SHARD1);
fail("Deleting an active shard should not have succeeded");
- } catch (HttpSolrServer.RemoteSolrException e) {
+ } catch (HttpSolrClient.RemoteSolrException e) {
// expected
}
@@ -143,15 +143,15 @@ public class DeleteShardTest extends Abs
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
+ String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
- HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
- baseServer.setConnectionTimeout(15000);
- baseServer.setSoTimeout(60000);
- baseServer.request(request);
- baseServer.shutdown();
+ HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
+ baseClient.setConnectionTimeout(15000);
+ baseClient.setSoTimeout(60000);
+ baseClient.request(request);
+ baseClient.shutdown();
}
protected void setSliceState(String slice, String state) throws SolrServerException, IOException,
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java Wed Dec 31 14:05:48 2014
@@ -18,10 +18,8 @@ package org.apache.solr.cloud;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.SentinelIntSet;
import org.apache.solr.CursorPagingTest;
-import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.request.LukeRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
@@ -636,7 +634,7 @@ public class DistribCursorPagingTest ext
/**
* Given a QueryResponse returned by SolrServer.query, asserts that the
* numFound on the doc list matches the expectation
- * @see SolrServer#query
+ * @see org.apache.solr.client.solrj.SolrClient#query
*/
private void assertNumFound(int expected, QueryResponse rsp) {
assertEquals(expected, extractDocList(rsp).getNumFound());
@@ -645,7 +643,7 @@ public class DistribCursorPagingTest ext
/**
* Given a QueryResponse returned by SolrServer.query, asserts that the
* start on the doc list matches the expectation
- * @see SolrServer#query
+ * @see org.apache.solr.client.solrj.SolrClient#query
*/
private void assertStartsAt(int expected, QueryResponse rsp) {
assertEquals(expected, extractDocList(rsp).getStart());
@@ -654,7 +652,7 @@ public class DistribCursorPagingTest ext
/**
* Given a QueryResponse returned by SolrServer.query, asserts that the
* "id" of the list of documents returned matches the expected list
- * @see SolrServer#query
+ * @see org.apache.solr.client.solrj.SolrClient#query
*/
private void assertDocList(QueryResponse rsp, Object... ids) {
SolrDocumentList docs = extractDocList(rsp);
@@ -669,7 +667,7 @@ public class DistribCursorPagingTest ext
/**
* Given a QueryResponse returned by SolrServer.query, asserts that the
* response does include {@link #CURSOR_MARK_NEXT} key and returns it
- * @see SolrServer#query
+ * @see org.apache.solr.client.solrj.SolrClient#query
*/
private String assertHashNextCursorMark(QueryResponse rsp) {
String r = rsp.getNextCursorMark();
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java Wed Dec 31 14:05:48 2014
@@ -17,16 +17,8 @@
package org.apache.solr.cloud;
import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.params.ModifiableSolrParams;
@@ -39,8 +31,6 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
-import java.util.ArrayList;
-import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.HashSet;
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ExternalCollectionsTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ExternalCollectionsTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ExternalCollectionsTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ExternalCollectionsTest.java Wed Dec 31 14:05:48 2014
@@ -17,7 +17,7 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
@@ -30,7 +30,7 @@ import org.junit.Before;
import org.junit.BeforeClass;
public class ExternalCollectionsTest extends AbstractFullDistribZkTestBase {
- private CloudSolrServer client;
+ private CloudSolrClient client;
@BeforeClass
public static void beforeThisClass2() throws Exception {
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java Wed Dec 31 14:05:48 2014
@@ -17,18 +17,13 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.lucene.util.LuceneTestCase.BadApple;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
+import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
@@ -38,13 +33,17 @@ import org.apache.solr.common.SolrInputD
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.update.VersionInfo;
import org.apache.solr.update.processor.DistributedUpdateProcessor;
import org.apache.zookeeper.CreateMode;
import org.junit.BeforeClass;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
/**
* Super basic testing, no shard restarting or anything.
*/
@@ -130,9 +129,9 @@ public class FullSolrCloudDistribCmdsTes
docId = testIndexQueryDeleteHierarchical(docId);
- docId = testIndexingDocPerRequestWithHttpSolrServer(docId);
+ docId = testIndexingDocPerRequestWithHttpSolrClient(docId);
- testIndexingWithSuss(docId);
+ testConcurrentIndexing(docId);
// TODO: testOptimisticUpdate(results);
@@ -141,7 +140,7 @@ public class FullSolrCloudDistribCmdsTes
docId = testThatCantForwardToLeaderFails(docId);
- docId = testIndexingBatchPerRequestWithHttpSolrServer(docId);
+ docId = testIndexingBatchPerRequestWithHttpSolrClient(docId);
}
private long testThatCantForwardToLeaderFails(long docId) throws Exception {
@@ -316,7 +315,7 @@ public class FullSolrCloudDistribCmdsTes
}
- private long testIndexingDocPerRequestWithHttpSolrServer(long docId) throws Exception {
+ private long testIndexingDocPerRequestWithHttpSolrClient(long docId) throws Exception {
int docs = random().nextInt(TEST_NIGHTLY ? 4013 : 97) + 1;
for (int i = 0; i < docs; i++) {
UpdateRequest uReq;
@@ -335,7 +334,7 @@ public class FullSolrCloudDistribCmdsTes
return docId++;
}
- private long testIndexingBatchPerRequestWithHttpSolrServer(long docId) throws Exception {
+ private long testIndexingBatchPerRequestWithHttpSolrClient(long docId) throws Exception {
// remove collection
ModifiableSolrParams params = new ModifiableSolrParams();
@@ -432,25 +431,25 @@ public class FullSolrCloudDistribCmdsTes
return -1;
}
- private long testIndexingWithSuss(long docId) throws Exception {
- ConcurrentUpdateSolrServer suss = new ConcurrentUpdateSolrServer(
- ((HttpSolrServer) clients.get(0)).getBaseURL(), 10, 2);
+ private long testConcurrentIndexing(long docId) throws Exception {
+ ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient(
+ ((HttpSolrClient) clients.get(0)).getBaseURL(), 10, 2);
QueryResponse results = query(cloudClient);
long beforeCount = results.getResults().getNumFound();
int cnt = TEST_NIGHTLY ? 2933 : 313;
try {
- suss.setConnectionTimeout(120000);
+ concurrentClient.setConnectionTimeout(120000);
for (int i = 0; i < cnt; i++) {
- index_specific(suss, id, docId++, "text_t", "some text so that it not's negligent work to parse this doc, even though it's still a pretty short doc");
+ index_specific(concurrentClient, id, docId++, "text_t", "some text so that it not's negligent work to parse this doc, even though it's still a pretty short doc");
}
- suss.blockUntilFinished();
+ concurrentClient.blockUntilFinished();
commit();
checkShardConsistency();
assertDocCounts(VERBOSE);
} finally {
- suss.shutdown();
+ concurrentClient.shutdown();
}
results = query(cloudClient);
assertEquals(beforeCount + cnt, results.getResults().getNumFound());
@@ -497,9 +496,9 @@ public class FullSolrCloudDistribCmdsTes
assertEquals(1, res.getResults().getNumFound());
}
- private QueryResponse query(SolrServer server) throws SolrServerException {
+ private QueryResponse query(SolrClient client) throws SolrServerException {
SolrQuery query = new SolrQuery("*:*");
- return server.query(query);
+ return client.query(query);
}
@Override
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java Wed Dec 31 14:05:48 2014
@@ -17,22 +17,11 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import java.io.File;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.JSONTestUtil;
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -52,6 +41,17 @@ import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.File;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
/**
* Simulates HTTP partitions between a leader and replica but the replica does
* not lose its ZooKeeper connection.
@@ -341,7 +341,7 @@ public class HttpPartitionTest extends A
testCollectionName+"; clusterState: "+printClusterStateInfo(testCollectionName), leader);
JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader));
- HttpSolrServer leaderSolr = getHttpSolrServer(leader, testCollectionName);
+ HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
SolrInputDocument doc = new SolrInputDocument();
doc.addField(id, String.valueOf(2));
doc.addField("a_t", "hello" + 2);
@@ -377,7 +377,7 @@ public class HttpPartitionTest extends A
leaderSolr.shutdown();
// if the add worked, then the doc must exist on the new leader
- HttpSolrServer newLeaderSolr = getHttpSolrServer(currentLeader, testCollectionName);
+ HttpSolrClient newLeaderSolr = getHttpSolrClient(currentLeader, testCollectionName);
try {
assertDocExists(newLeaderSolr, testCollectionName, "2");
} finally {
@@ -386,7 +386,7 @@ public class HttpPartitionTest extends A
} catch (SolrException exc) {
// this is ok provided the doc doesn't exist on the current leader
- leaderSolr = getHttpSolrServer(currentLeader, testCollectionName);
+ leaderSolr = getHttpSolrClient(currentLeader, testCollectionName);
try {
leaderSolr.add(doc); // this should work
} finally {
@@ -439,18 +439,18 @@ public class HttpPartitionTest extends A
throws Exception {
Replica leader =
cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
- HttpSolrServer leaderSolr = getHttpSolrServer(leader, testCollectionName);
- List<HttpSolrServer> replicas =
- new ArrayList<HttpSolrServer>(notLeaders.size());
+ HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
+ List<HttpSolrClient> replicas =
+ new ArrayList<HttpSolrClient>(notLeaders.size());
for (Replica r : notLeaders) {
- replicas.add(getHttpSolrServer(r, testCollectionName));
+ replicas.add(getHttpSolrClient(r, testCollectionName));
}
try {
for (int d = firstDocId; d <= lastDocId; d++) {
String docId = String.valueOf(d);
assertDocExists(leaderSolr, testCollectionName, docId);
- for (HttpSolrServer replicaSolr : replicas) {
+ for (HttpSolrClient replicaSolr : replicas) {
assertDocExists(replicaSolr, testCollectionName, docId);
}
}
@@ -458,16 +458,16 @@ public class HttpPartitionTest extends A
if (leaderSolr != null) {
leaderSolr.shutdown();
}
- for (HttpSolrServer replicaSolr : replicas) {
+ for (HttpSolrClient replicaSolr : replicas) {
replicaSolr.shutdown();
}
}
}
- protected HttpSolrServer getHttpSolrServer(Replica replica, String coll) throws Exception {
+ protected HttpSolrClient getHttpSolrClient(Replica replica, String coll) throws Exception {
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
String url = zkProps.getBaseUrl() + "/" + coll;
- return new HttpSolrServer(url);
+ return new HttpSolrClient(url);
}
protected void sendDoc(int docId) throws Exception {
@@ -486,7 +486,7 @@ public class HttpPartitionTest extends A
* exists in the provided server, using distrib=false so it doesn't route to another replica.
*/
@SuppressWarnings("rawtypes")
- protected void assertDocExists(HttpSolrServer solr, String coll, String docId) throws Exception {
+ protected void assertDocExists(HttpSolrClient solr, String coll, String docId) throws Exception {
QueryRequest qr = new QueryRequest(params("qt", "/get", "id", docId, "distrib", "false"));
NamedList rsp = solr.request(qr);
String match = JSONTestUtil.matchObj("/id", rsp.get("doc"), new Integer(docId));
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java Wed Dec 31 14:05:48 2014
@@ -110,8 +110,8 @@ public class LeaderFailoverAfterPartitio
// doc should be on leader and 1 replica
sendDoc(5);
- assertDocExists(getHttpSolrServer(leader, testCollectionName), testCollectionName, "5");
- assertDocExists(getHttpSolrServer(notLeaders.get(1), testCollectionName), testCollectionName, "5");
+ assertDocExists(getHttpSolrClient(leader, testCollectionName), testCollectionName, "5");
+ assertDocExists(getHttpSolrClient(notLeaders.get(1), testCollectionName), testCollectionName, "5");
Thread.sleep(sleepMsBeforeHealPartition);
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java Wed Dec 31 14:05:48 2014
@@ -17,17 +17,17 @@ package org.apache.solr.cloud;
* limitations under the License.
*/
-import java.io.File;
-import java.util.List;
-
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.junit.After;
import org.junit.Before;
+import java.io.File;
+import java.util.List;
+
public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest {
private static final long sleepMsBeforeHealPartition = 2000L;
@@ -91,8 +91,8 @@ public class LeaderInitiatedRecoveryOnCo
// let's find the leader of shard2 and ask him to commit
Replica shard2Leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard2");
- HttpSolrServer server = new HttpSolrServer(ZkCoreNodeProps.getCoreUrl(shard2Leader.getStr("base_url"), shard2Leader.getStr("core")));
- server.commit();
+ HttpSolrClient client = new HttpSolrClient(ZkCoreNodeProps.getCoreUrl(shard2Leader.getStr("base_url"), shard2Leader.getStr("core")));
+ client.commit();
Thread.sleep(sleepMsBeforeHealPartition);
@@ -133,8 +133,8 @@ public class LeaderInitiatedRecoveryOnCo
leaderProxy.close();
Replica replica = notLeaders.get(0);
- HttpSolrServer server = new HttpSolrServer(ZkCoreNodeProps.getCoreUrl(replica.getStr("base_url"), replica.getStr("core")));
- server.commit();
+ HttpSolrClient client = new HttpSolrClient(ZkCoreNodeProps.getCoreUrl(replica.getStr("base_url"), replica.getStr("core")));
+ client.commit();
Thread.sleep(sleepMsBeforeHealPartition);
Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java?rev=1648697&r1=1648696&r2=1648697&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java Wed Dec 31 14:05:48 2014
@@ -20,8 +20,8 @@ package org.apache.solr.cloud;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrServer;
-import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
@@ -30,7 +30,6 @@ import org.apache.solr.common.cloud.Clus
import org.apache.solr.common.cloud.RoutingRule;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.zookeeper.KeeperException;
@@ -43,8 +42,8 @@ import java.util.List;
import java.util.Map;
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
public class MigrateRouteKeyTest extends BasicDistributedZkTest {
@@ -103,8 +102,8 @@ public class MigrateRouteKeyTest extends
ClusterState state;Slice slice;
boolean ruleRemoved = false;
while (System.currentTimeMillis() - finishTime < 60000) {
- getCommonCloudSolrServer().getZkStateReader().updateClusterState(true);
- state = getCommonCloudSolrServer().getZkStateReader().getClusterState();
+ getCommonCloudSolrClient().getZkStateReader().updateClusterState(true);
+ state = getCommonCloudSolrClient().getZkStateReader().getClusterState();
slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
Map<String,RoutingRule> routingRules = slice.getRoutingRules();
if (routingRules == null || routingRules.isEmpty() || !routingRules.containsKey(splitKey)) {
@@ -133,20 +132,20 @@ public class MigrateRouteKeyTest extends
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
+ String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
- HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
- baseServer.setConnectionTimeout(15000);
- baseServer.setSoTimeout(60000 * 5);
- baseServer.request(request);
- baseServer.shutdown();
+ HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
+ baseClient.setConnectionTimeout(15000);
+ baseClient.setSoTimeout(60000 * 5);
+ baseClient.request(request);
+ baseClient.shutdown();
}
private void createCollection(String targetCollection) throws Exception {
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
- CloudSolrServer client = null;
+ CloudSolrClient client = null;
try {
client = createCloudClient(null);
Map<String, Object> props = ZkNodeProps.makeMap(
@@ -193,8 +192,8 @@ public class MigrateRouteKeyTest extends
Indexer indexer = new Indexer(cloudClient, splitKey, 1, 30);
indexer.start();
- String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), targetCollection);
- HttpSolrServer collectionClient = new HttpSolrServer(url);
+ String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), targetCollection);
+ HttpSolrClient collectionClient = new HttpSolrClient(url);
SolrQuery solrQuery = new SolrQuery("*:*");
assertEquals("DocCount on target collection does not match", 0, collectionClient.query(solrQuery).getResults().getNumFound());
@@ -221,8 +220,8 @@ public class MigrateRouteKeyTest extends
collectionClient.shutdown();
collectionClient = null;
- getCommonCloudSolrServer().getZkStateReader().updateClusterState(true);
- ClusterState state = getCommonCloudSolrServer().getZkStateReader().getClusterState();
+ getCommonCloudSolrClient().getZkStateReader().updateClusterState(true);
+ ClusterState state = getCommonCloudSolrClient().getZkStateReader().getClusterState();
Slice slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
assertNotNull("Routing rule map is null", slice.getRoutingRules());
assertFalse("Routing rule map is empty", slice.getRoutingRules().isEmpty());
@@ -234,12 +233,12 @@ public class MigrateRouteKeyTest extends
static class Indexer extends Thread {
final int seconds;
- final CloudSolrServer cloudClient;
+ final CloudSolrClient cloudClient;
final String splitKey;
int splitKeyCount = 0;
final int bitSep;
- public Indexer(CloudSolrServer cloudClient, String splitKey, int bitSep, int seconds) {
+ public Indexer(CloudSolrClient cloudClient, String splitKey, int bitSep, int seconds) {
this.seconds = seconds;
this.cloudClient = cloudClient;
this.splitKey = splitKey;