You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ro...@apache.org on 2016/12/29 12:23:03 UTC
[2/6] lucene-solr:branch_6x: SOLR-9132: Cut over some more tests
SOLR-9132: Cut over some more tests
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/183f9980
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/183f9980
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/183f9980
Branch: refs/heads/branch_6x
Commit: 183f998098b0764117d2f858df9909a4ee139cc0
Parents: 529c60d
Author: Alan Woodward <ro...@apache.org>
Authored: Thu Oct 27 16:50:28 2016 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Thu Dec 29 11:57:16 2016 +0000
----------------------------------------------------------------------
.../java/org/apache/solr/cloud/Overseer.java | 2 +-
.../solr/cloud/CollectionsAPISolrJTest.java | 470 +++++++------------
.../DeleteLastCustomShardedReplicaTest.java | 104 +---
.../org/apache/solr/cloud/DeleteShardTest.java | 211 +++------
.../cloud/OverseerModifyCollectionTest.java | 92 ++--
.../apache/solr/cloud/OverseerRolesTest.java | 165 +++----
.../apache/solr/cloud/OverseerStatusTest.java | 55 +--
.../apache/solr/cloud/RemoteQueryErrorTest.java | 53 +--
.../solr/cloud/TestDownShardTolerantSearch.java | 40 +-
.../TestExclusionRuleCollectionAccess.java | 38 +-
.../PKIAuthenticationIntegrationTest.java | 40 +-
.../solrj/request/CollectionAdminRequest.java | 2 +
12 files changed, 457 insertions(+), 815 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/java/org/apache/solr/cloud/Overseer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index d7285fa..a618874 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -371,7 +371,7 @@ public class Overseer implements Closeable {
return Collections.singletonList(new SliceMutator(getZkStateReader()).updateShardState(clusterState, message));
case QUIT:
if (myId.equals(message.get("id"))) {
- log.info("Quit command received {}", LeaderElector.getNodeName(myId));
+ log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
overseerCollectionConfigSetProcessor.close();
close();
} else {
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index b04bfbc..616b657 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -16,70 +16,47 @@
*/
package org.apache.solr.cloud;
-import java.io.File;
import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-import org.apache.commons.codec.binary.StringUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ClusterProperties;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.NamedList;
-import org.apache.solr.util.TimeOut;
import org.apache.zookeeper.KeeperException;
+import org.junit.BeforeClass;
import org.junit.Test;
-import static org.apache.solr.cloud.ReplicaPropertiesBase.verifyUniqueAcrossCollection;
-
@LuceneTestCase.Slow
-public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
+public class CollectionsAPISolrJTest extends SolrCloudTestCase {
- @Test
- public void test() throws Exception {
- testCreateAndDeleteCollection();
- testCreateAndDeleteShard();
- testReloadCollection();
- testCreateAndDeleteAlias();
- testSplitShard();
- testCreateCollectionWithPropertyParam();
- testAddAndDeleteReplica();
- testClusterProp();
- testAddAndRemoveRole();
- testOverseerStatus();
- testList();
- testAddAndDeleteReplicaProp();
- testBalanceShardUnique();
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(4)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
}
- protected void testCreateAndDeleteCollection() throws Exception {
+ @Test
+ public void testCreateAndDeleteCollection() throws Exception {
String collectionName = "solrj_test";
- CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create()
- .setCollectionName(collectionName)
- .setNumShards(2)
- .setReplicationFactor(2)
- .setConfigName("conf1")
- .setRouterField("myOwnField")
- .setStateFormat(1);
-
- CollectionAdminResponse response = createCollectionRequest.process(cloudClient);
+ CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+ .setStateFormat(1)
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
@@ -91,57 +68,44 @@ public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
assertTrue(status.get("QTime") > 0);
}
- cloudClient.setDefaultCollection(collectionName);
- CollectionAdminRequest.Delete deleteCollectionRequest = new CollectionAdminRequest.Delete()
- .setCollectionName(collectionName);
- response = deleteCollectionRequest.process(cloudClient);
+ response = CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
Map<String,NamedList<Integer>> nodesStatus = response.getCollectionNodesStatus();
- assertNull("Deleted collection " + collectionName + "still exists",
- cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collectionName));
assertEquals(4, nodesStatus.size());
-
+
+ waitForState("Expected " + collectionName + " to disappear from cluster state", collectionName, (n, c) -> c == null);
+
// Test Creating a collection with new stateformat.
collectionName = "solrj_newstateformat";
- createCollectionRequest = new CollectionAdminRequest.Create()
- .setCollectionName(collectionName)
- .setNumShards(2)
- .setConfigName("conf1")
- .setStateFormat(2);
- response = createCollectionRequest.process(cloudClient);
+ response = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+ .setStateFormat(2)
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
- waitForRecoveriesToFinish(collectionName, false);
- assertTrue("Collection state does not exist",
- cloudClient.getZkStateReader().getZkClient()
- .exists(ZkStateReader.getCollectionPath(collectionName), true));
+ waitForState("Expected " + collectionName + " to appear in cluster state", collectionName, (n, c) -> c != null);
}
-
- protected void testCreateAndDeleteShard() throws IOException, SolrServerException {
+
+ @Test
+ public void testCreateAndDeleteShard() throws IOException, SolrServerException {
+
// Create an implicit collection
String collectionName = "solrj_implicit";
- CollectionAdminResponse response = new CollectionAdminRequest.Create()
- .setCollectionName(collectionName)
- .setShards("shardA,shardB")
- .setConfigName("conf1")
- .setRouterName("implicit").process(cloudClient);
+ CollectionAdminResponse response
+ = CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardA,shardB", 1)
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
assertEquals(2, coresStatus.size());
- cloudClient.setDefaultCollection(collectionName);
// Add a shard to the implicit collection
- response = new CollectionAdminRequest
- .CreateShard()
- .setCollectionName(collectionName)
- .setShardName("shardC").process(cloudClient);
+ response = CollectionAdminRequest.createShard(collectionName, "shardC").process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
@@ -149,57 +113,38 @@ public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
assertEquals(1, coresStatus.size());
assertEquals(0, (int) coresStatus.get(collectionName + "_shardC_replica1").get("status"));
- CollectionAdminRequest.DeleteShard deleteShardRequest = new CollectionAdminRequest
- .DeleteShard()
- .setCollectionName(collectionName)
- .setShardName("shardC");
- response = deleteShardRequest.process(cloudClient);
+ response = CollectionAdminRequest.deleteShard(collectionName, "shardC").process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
Map<String, NamedList<Integer>> nodesStatus = response.getCollectionNodesStatus();
assertEquals(1, nodesStatus.size());
}
-
- protected void testReloadCollection() throws IOException, SolrServerException {
- cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
- CollectionAdminRequest.Reload reloadCollectionRequest = new CollectionAdminRequest.Reload()
- .setCollectionName("collection1");
- CollectionAdminResponse response = reloadCollectionRequest.process(cloudClient);
- assertEquals(0, response.getStatus());
- }
-
- protected void testCreateAndDeleteAlias() throws IOException, SolrServerException {
- CollectionAdminRequest.CreateAlias createAliasRequest = new CollectionAdminRequest
- .CreateAlias()
- .setAliasName("solrj_alias")
- .setAliasedCollections(DEFAULT_COLLECTION);
- CollectionAdminResponse response = createAliasRequest.process(cloudClient);
+ @Test
+ public void testCreateAndDeleteAlias() throws IOException, SolrServerException {
+
+ final String collection = "aliasedCollection";
+ CollectionAdminRequest.createCollection(collection, "conf", 1, 1).process(cluster.getSolrClient());
+ CollectionAdminResponse response
+ = CollectionAdminRequest.createAlias("solrj_alias", collection).process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
- CollectionAdminRequest.DeleteAlias deleteAliasRequest = new CollectionAdminRequest.DeleteAlias()
- .setAliasName("solrj_alias");
- deleteAliasRequest.process(cloudClient);
-
+ response = CollectionAdminRequest.deleteAlias("solrj_alias").process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
}
-
- protected void testSplitShard() throws Exception {
- String collectionName = "solrj_test_splitshard";
- cloudClient.setDefaultCollection(collectionName);
-
- CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create()
- .setConfigName("conf1")
- .setNumShards(2)
- .setCollectionName(collectionName);
- createCollectionRequest.process(cloudClient);
-
- CollectionAdminRequest.SplitShard splitShardRequest = new CollectionAdminRequest.SplitShard()
- .setCollectionName(collectionName)
- .setShardName("shard1");
- CollectionAdminResponse response = splitShardRequest.process(cloudClient);
+
+ @Test
+ public void testSplitShard() throws Exception {
+
+ final String collectionName = "solrj_test_splitshard";
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+ .process(cluster.getSolrClient());
+
+ CollectionAdminResponse response = CollectionAdminRequest.splitShard(collectionName)
+ .setShardName("shard1")
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
@@ -207,267 +152,204 @@ public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
assertEquals(0, (int) coresStatus.get(collectionName + "_shard1_0_replica1").get("status"));
assertEquals(0, (int) coresStatus.get(collectionName + "_shard1_1_replica1").get("status"));
- waitForRecoveriesToFinish(collectionName, false);
- waitForThingsToLevelOut(10);
+ waitForState("Expected all shards to be active and parent shard to be removed", collectionName, (n, c) -> {
+ if (c.getSlice("shard1").getState() == Slice.State.ACTIVE)
+ return false;
+ for (Replica r : c.getReplicas()) {
+ if (r.isActive(n) == false)
+ return false;
+ }
+ return true;
+ });
// Test splitting using split.key
- splitShardRequest = new CollectionAdminRequest.SplitShard()
- .setCollectionName(collectionName)
- .setSplitKey("b!");
- response = splitShardRequest.process(cloudClient);
+ response = CollectionAdminRequest.splitShard(collectionName)
+ .setSplitKey("b!")
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
- waitForRecoveriesToFinish(collectionName, false);
- waitForThingsToLevelOut(10);
-
- ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
- Collection<Slice> slices = clusterState.getActiveSlices(collectionName);
- assertEquals("ClusterState: "+ clusterState.getActiveSlices(collectionName), 5, slices.size());
+ waitForState("Expected 5 slices to be active", collectionName, (n, c) -> c.getActiveSlices().size() == 5);
}
- private void testCreateCollectionWithPropertyParam() throws Exception {
+ @Test
+ public void testCreateCollectionWithPropertyParam() throws Exception {
+
String collectionName = "solrj_test_core_props";
- File tmpDir = createTempDir("testPropertyParamsForCreate").toFile();
- File dataDir = new File(tmpDir, "dataDir-" + TestUtil.randomSimpleString(random(), 1, 5));
- File ulogDir = new File(tmpDir, "ulogDir-" + TestUtil.randomSimpleString(random(), 1, 5));
+ Path tmpDir = createTempDir("testPropertyParamsForCreate");
+ Path dataDir = tmpDir.resolve("dataDir-" + TestUtil.randomSimpleString(random(), 1, 5));
+ Path ulogDir = tmpDir.resolve("ulogDir-" + TestUtil.randomSimpleString(random(), 1, 5));
- Properties properties = new Properties();
- properties.put(CoreAdminParams.DATA_DIR, dataDir.getAbsolutePath());
- properties.put(CoreAdminParams.ULOG_DIR, ulogDir.getAbsolutePath());
+ CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1)
+ .withProperty(CoreAdminParams.DATA_DIR, dataDir.toString())
+ .withProperty(CoreAdminParams.ULOG_DIR, ulogDir.toString())
+ .process(cluster.getSolrClient());
- CollectionAdminRequest.Create createReq = new CollectionAdminRequest.Create()
- .setCollectionName(collectionName)
- .setNumShards(1)
- .setConfigName("conf1")
- .setProperties(properties);
-
- CollectionAdminResponse response = createReq.process(cloudClient);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
assertEquals(1, coresStatus.size());
- DocCollection testCollection = cloudClient.getZkStateReader()
- .getClusterState().getCollection(collectionName);
+ DocCollection testCollection = getCollectionState(collectionName);
Replica replica1 = testCollection.getReplica("core_node1");
+ CoreStatus coreStatus = getCoreStatus(replica1);
- try (HttpSolrClient client = getHttpSolrClient(replica1.getStr("base_url"))) {
- CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), client);
- NamedList<Object> coreStatus = status.getCoreStatus(replica1.getStr("core"));
- String dataDirStr = (String) coreStatus.get("dataDir");
- assertEquals("Data dir does not match param given in property.dataDir syntax",
- new File(dataDirStr).getAbsolutePath(), dataDir.getAbsolutePath());
- }
+ assertEquals(Paths.get(coreStatus.getDataDirectory()).toString(), dataDir.toString());
- CollectionAdminRequest.Delete deleteCollectionRequest = new CollectionAdminRequest.Delete();
- deleteCollectionRequest.setCollectionName(collectionName);
- deleteCollectionRequest.process(cloudClient);
}
- private void testAddAndDeleteReplica() throws Exception {
- String collectionName = "solrj_replicatests";
- createCollection(collectionName, cloudClient, 1, 2);
+ @Test
+ public void testAddAndDeleteReplica() throws Exception {
- cloudClient.setDefaultCollection(collectionName);
+ final String collectionName = "solrj_replicatests";
+ CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
+ .process(cluster.getSolrClient());
- String newReplicaName = Assign.assignNode(cloudClient.getZkStateReader().getClusterState().getCollection(collectionName));
- ArrayList<String> nodeList = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getLiveNodes());
+ String newReplicaName = Assign.assignNode(getCollectionState(collectionName));
+ ArrayList<String> nodeList
+ = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
Collections.shuffle(nodeList, random());
- CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica()
- .setCollectionName(collectionName)
- .setShardName("shard1")
- .setNode(nodeList.get(0));
- CollectionAdminResponse response = addReplica.process(cloudClient);
+ final String node = nodeList.get(0);
+
+ CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .setNode(node)
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
- TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
- Replica newReplica = null;
-
- while (! timeout.hasTimedOut() && newReplica == null) {
- Slice slice = cloudClient.getZkStateReader().getClusterState().getSlice(collectionName, "shard1");
- newReplica = slice.getReplica(newReplicaName);
- }
-
- assertNotNull(newReplica);
-
- assertEquals("Replica should be created on the right node",
- cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
- newReplica.getStr(ZkStateReader.BASE_URL_PROP)
- );
+ waitForState("Expected to see replica " + newReplicaName + " on node " + node, collectionName, (n, c) -> {
+ Replica r = c.getSlice("shard1").getReplica(newReplicaName);
+ return r != null && r.getNodeName().equals(node);
+ });
// Test DELETEREPLICA
- CollectionAdminRequest.DeleteReplica deleteReplicaRequest = new CollectionAdminRequest.DeleteReplica()
- .setCollectionName(collectionName)
- .setShardName("shard1")
- .setReplica(newReplicaName);
- response = deleteReplicaRequest.process(cloudClient);
-
+ response = CollectionAdminRequest.deleteReplica(collectionName, "shard1", newReplicaName)
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
- timeout = new TimeOut(3, TimeUnit.SECONDS);
-
- while (! timeout.hasTimedOut() && newReplica != null) {
- Slice slice = cloudClient.getZkStateReader().getClusterState().getSlice(collectionName, "shard1");
- newReplica = slice.getReplica(newReplicaName);
- }
+ waitForState("Expected replica " + newReplicaName + " to vanish from cluster state", collectionName,
+ (n, c) -> c.getSlice("shard1").getReplica(newReplicaName) == null);
- assertNull(newReplica);
}
- private void testClusterProp() throws InterruptedException, IOException, SolrServerException {
- CollectionAdminRequest.ClusterProp clusterPropRequest = new CollectionAdminRequest.ClusterProp()
- .setPropertyName(ZkStateReader.LEGACY_CLOUD)
- .setPropertyValue("false");
- CollectionAdminResponse response = clusterPropRequest.process(cloudClient);
+ @Test
+ public void testClusterProp() throws InterruptedException, IOException, SolrServerException {
+
+ CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
- TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
- boolean changed = false;
-
- while(! timeout.hasTimedOut()){
- Thread.sleep(10);
- changed = Objects.equals("false",
- cloudClient.getZkStateReader().getClusterProperty(ZkStateReader.LEGACY_CLOUD, "none"));
- if(changed) break;
- }
- assertTrue("The Cluster property wasn't set", changed);
+ ClusterProperties props = new ClusterProperties(zkClient());
+ assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true"), "false");
// Unset ClusterProp that we set.
- clusterPropRequest = new CollectionAdminRequest.ClusterProp()
- .setPropertyName(ZkStateReader.LEGACY_CLOUD)
- .setPropertyValue(null);
- clusterPropRequest.process(cloudClient);
-
- timeout = new TimeOut(3, TimeUnit.SECONDS);
- changed = false;
- while(! timeout.hasTimedOut()) {
- Thread.sleep(10);
- changed = (cloudClient.getZkStateReader().getClusterProperty(ZkStateReader.LEGACY_CLOUD, (String) null) == null);
- if(changed)
- break;
- }
- assertTrue("The Cluster property wasn't unset", changed);
+ CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, null).process(cluster.getSolrClient());
+ assertEquals("Cluster property was not unset", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true"), "true");
+
}
- private void testAddAndRemoveRole() throws InterruptedException, IOException, SolrServerException {
- cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
- Replica replica = cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
- CollectionAdminRequest.AddRole addRoleRequest = new CollectionAdminRequest.AddRole()
- .setNode(replica.getNodeName())
- .setRole("overseer");
- addRoleRequest.process(cloudClient);
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testAddAndRemoveRole() throws InterruptedException, IOException, SolrServerException {
+
+ String node = cluster.getRandomJetty(random()).getNodeName();
+
+ CollectionAdminRequest.addRole(node, "overseer").process(cluster.getSolrClient());
- CollectionAdminRequest.ClusterStatus clusterStatusRequest = new CollectionAdminRequest.ClusterStatus()
- .setCollectionName(DEFAULT_COLLECTION);
- CollectionAdminResponse response = clusterStatusRequest.process(cloudClient);
+ CollectionAdminResponse response = CollectionAdminRequest.getClusterStatus().process(cluster.getSolrClient());
NamedList<Object> rsp = response.getResponse();
- NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
- assertNotNull("Cluster state should not be null", cluster);
- Map<String, Object> roles = (Map<String, Object>) cluster.get("roles");
+ NamedList<Object> cs = (NamedList<Object>) rsp.get("cluster");
+ assertNotNull("Cluster state should not be null", cs);
+ Map<String, Object> roles = (Map<String, Object>) cs.get("roles");
assertNotNull("Role information should not be null", roles);
List<String> overseer = (List<String>) roles.get("overseer");
assertNotNull(overseer);
assertEquals(1, overseer.size());
- assertTrue(overseer.contains(replica.getNodeName()));
+ assertTrue(overseer.contains(node));
// Remove role
- new CollectionAdminRequest.RemoveRole()
- .setNode(replica.getNodeName())
- .setRole("overseer")
- .process(cloudClient);
-
- clusterStatusRequest = new CollectionAdminRequest.ClusterStatus();
- clusterStatusRequest.setCollectionName(DEFAULT_COLLECTION);
- response = clusterStatusRequest.process(cloudClient);
+ CollectionAdminRequest.removeRole(node, "overseer").process(cluster.getSolrClient());
+ response = CollectionAdminRequest.getClusterStatus().process(cluster.getSolrClient());
rsp = response.getResponse();
- cluster = (NamedList<Object>) rsp.get("cluster");
- assertNotNull("Cluster state should not be null", cluster);
- roles = (Map<String, Object>) cluster.get("roles");
+ cs = (NamedList<Object>) rsp.get("cluster");
+ assertNotNull("Cluster state should not be null", cs);
+ roles = (Map<String, Object>) cs.get("roles");
assertNotNull("Role information should not be null", roles);
overseer = (List<String>) roles.get("overseer");
- assertFalse(overseer.contains(replica.getNodeName()));
+ assertFalse(overseer.contains(node));
}
-
- private void testOverseerStatus() throws IOException, SolrServerException {
- CollectionAdminResponse response = new CollectionAdminRequest.OverseerStatus().process(cloudClient);
+
+ @Test
+ public void testOverseerStatus() throws IOException, SolrServerException {
+ CollectionAdminResponse response = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertNotNull("overseer_operations shouldn't be null", response.getResponse().get("overseer_operations"));
}
-
- private void testList() throws IOException, SolrServerException {
- CollectionAdminResponse response = new CollectionAdminRequest.List().process(cloudClient);
+
+ @Test
+ public void testList() throws IOException, SolrServerException {
+ CollectionAdminResponse response = new CollectionAdminRequest.List().process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertNotNull("collection list should not be null", response.getResponse().get("collections"));
}
-
- private void testAddAndDeleteReplicaProp() throws InterruptedException, IOException, SolrServerException {
- Replica replica = cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
- CollectionAdminResponse response = new CollectionAdminRequest.AddReplicaProp()
- .setCollectionName(DEFAULT_COLLECTION)
- .setShardName(SHARD1)
- .setReplica(replica.getName())
- .setPropertyName("preferredleader")
- .setPropertyValue("true").process(cloudClient);
- assertEquals(0, response.getStatus());
- TimeOut timeout = new TimeOut(20, TimeUnit.SECONDS);
- String propertyValue = null;
-
- String replicaName = replica.getName();
- while (! timeout.hasTimedOut()) {
- ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
- replica = clusterState.getReplica(DEFAULT_COLLECTION, replicaName);
- propertyValue = replica.getStr("property.preferredleader");
- if(StringUtils.equals("true", propertyValue))
- break;
- Thread.sleep(50);
- }
-
- assertEquals("Replica property was not updated, Latest value: " +
- cloudClient.getZkStateReader().getClusterState().getReplica(DEFAULT_COLLECTION, replicaName),
- "true",
- propertyValue);
-
- response = new CollectionAdminRequest.DeleteReplicaProp()
- .setCollectionName(DEFAULT_COLLECTION)
- .setShardName(SHARD1)
- .setReplica(replicaName)
- .setPropertyName("property.preferredleader").process(cloudClient);
+ @Test
+ public void testAddAndDeleteReplicaProp() throws InterruptedException, IOException, SolrServerException {
+
+ final String collection = "replicaProperties";
+ CollectionAdminRequest.createCollection(collection, "conf", 2, 2)
+ .process(cluster.getSolrClient());
+
+ final Replica replica = getCollectionState(collection).getLeader("shard1");
+ CollectionAdminResponse response
+ = CollectionAdminRequest.addReplicaProperty(collection, "shard1", replica.getName(), "preferredleader", "true")
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
- timeout = new TimeOut(20, TimeUnit.SECONDS);
- boolean updated = false;
+ waitForState("Expecting property 'preferredleader' to appear on replica " + replica.getName(), collection,
+ (n, c) -> "true".equals(c.getReplica(replica.getName()).getStr("property.preferredleader")));
- while (! timeout.hasTimedOut()) {
- ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
- replica = clusterState.getReplica(DEFAULT_COLLECTION, replicaName);
- updated = replica.getStr("property.preferredleader") == null;
- if(updated)
- break;
- Thread.sleep(50);
- }
+ response = CollectionAdminRequest.deleteReplicaProperty(collection, "shard1", replica.getName(), "property.preferredleader")
+ .process(cluster.getSolrClient());
+ assertEquals(0, response.getStatus());
- assertTrue("Replica property was not removed", updated);
+ waitForState("Expecting property 'preferredleader' to be removed from replica " + replica.getName(), collection,
+ (n, c) -> c.getReplica(replica.getName()).getStr("property.preferredleader") == null);
}
-
- private void testBalanceShardUnique() throws IOException,
+
+ @Test
+ public void testBalanceShardUnique() throws IOException,
SolrServerException, KeeperException, InterruptedException {
- CollectionAdminResponse response = new CollectionAdminRequest.BalanceShardUnique()
- .setCollection(DEFAULT_COLLECTION)
- .setPropertyName("preferredLeader").process(cloudClient);
+
+ final String collection = "balancedProperties";
+ CollectionAdminRequest.createCollection(collection, "conf", 2, 2)
+ .process(cluster.getSolrClient());
+
+ CollectionAdminResponse response = CollectionAdminRequest.balanceReplicaProperty(collection, "preferredLeader")
+ .process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
- verifyUniqueAcrossCollection(cloudClient, DEFAULT_COLLECTION, "property.preferredleader");
+ waitForState("Expecting 'preferredleader' property to be balanced across all shards", collection, (n, c) -> {
+ for (Slice slice : c) {
+ int count = 0;
+ for (Replica replica : slice) {
+ if ("true".equals(replica.getStr("property.preferredleader")))
+ count += 1;
+ }
+ if (count != 1)
+ return false;
+ }
+ return true;
+ });
+
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
index dcc99a4..c46362e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteLastCustomShardedReplicaTest.java
@@ -16,106 +16,40 @@
*/
package org.apache.solr.cloud;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.TimeOut;
+import org.junit.BeforeClass;
import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
+public class DeleteLastCustomShardedReplicaTest extends SolrCloudTestCase {
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
-import static org.apache.solr.common.util.Utils.makeMap;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-
-public class DeleteLastCustomShardedReplicaTest extends AbstractFullDistribZkTestBase {
-
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
- protected String getSolrXml() {
- return "solr.xml";
- }
-
- public DeleteLastCustomShardedReplicaTest() {
- sliceCount = 2;
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
}
@Test
- @ShardsFixed(num = 2)
public void test() throws Exception {
- try (CloudSolrClient client = createCloudClient(null)) {
- int replicationFactor = 1;
- int maxShardsPerNode = 5;
-
- Map<String, Object> props = Utils.makeMap(
- "router.name", ImplicitDocRouter.NAME,
- ZkStateReader.REPLICATION_FACTOR, replicationFactor,
- ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode,
- NUM_SLICES, 1,
- SHARDS_PROP, "a,b");
- Map<String,List<Integer>> collectionInfos = new HashMap<>();
+ final String collectionName = "customcollreplicadeletion";
- String collectionName = "customcollreplicadeletion";
+ CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
+ .setMaxShardsPerNode(5)
+ .process(cluster.getSolrClient());
- createCollection(collectionInfos, collectionName, props, client);
+ DocCollection collectionState = getCollectionState(collectionName);
+ Replica replica = getRandomReplica(collectionState.getSlice("a"));
- waitForRecoveriesToFinish(collectionName, false);
+ CollectionAdminRequest.deleteReplica(collectionName, "a", replica.getName())
+ .process(cluster.getSolrClient());
- DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
- .getClusterState().getCollection(collectionName);
- Replica replica = testcoll.getSlice("a").getReplicas().iterator().next();
-
- removeAndWaitForReplicaGone(client, collectionName, replica, "a", replicationFactor-1);
- }
- }
+ waitForState("Expected shard 'a' to have no replicas", collectionName, (n, c) -> {
+ return c.getSlice("a") == null || c.getSlice("a").getReplicas().size() == 0;
+ });
- protected void removeAndWaitForReplicaGone(CloudSolrClient client, String COLL_NAME, Replica replica, String shard,
- final int expectedNumReplicasRemaining)
- throws SolrServerException, IOException, InterruptedException {
- Map m = makeMap("collection", COLL_NAME, "action", DELETEREPLICA.toLower(), "shard",
- shard, "replica", replica.getName());
- SolrParams params = new MapSolrParams(m);
- SolrRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
- client.request(request);
- TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
- boolean success = false;
- DocCollection testcoll = null;
- while (! timeout.hasTimedOut()) {
- testcoll = getCommonCloudSolrClient().getZkStateReader()
- .getClusterState().getCollection(COLL_NAME);
- // As of SOLR-5209 the last replica deletion no longer leads to
- // the deletion of the slice.
- final Slice slice = testcoll.getSlice(shard);
- final int actualNumReplicasRemaining = (slice == null ? 0 : slice.getReplicas().size());
- success = (actualNumReplicasRemaining == expectedNumReplicasRemaining);
- if (success) {
- log.info("replica cleaned up {}/{} core {}",
- shard + "/" + replica.getName(), replica.getStr("core"));
- log.info("current state {}", testcoll);
- break;
- }
- Thread.sleep(100);
- }
- assertTrue("Replica not cleaned up", success);
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
index 9097363..ed3d03b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java
@@ -20,202 +20,123 @@ import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
-import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.client.solrj.request.CoreStatus;
import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.Slice.State;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.Utils;
import org.apache.solr.util.FileUtils;
import org.apache.zookeeper.KeeperException;
+import org.junit.BeforeClass;
import org.junit.Test;
-public class DeleteShardTest extends AbstractFullDistribZkTestBase {
-
- public DeleteShardTest() {
- super();
- sliceCount = 2;
- }
+public class DeleteShardTest extends SolrCloudTestCase {
// TODO: Custom hash slice deletion test
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
+ }
+
@Test
- @ShardsFixed(num = 2)
public void test() throws Exception {
- ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
- Slice slice1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
- Slice slice2 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
+ final String collection = "deleteShard";
- assertNotNull("Shard1 not found", slice1);
- assertNotNull("Shard2 not found", slice2);
- assertSame("Shard1 is not active", Slice.State.ACTIVE, slice1.getState());
- assertSame("Shard2 is not active", Slice.State.ACTIVE, slice2.getState());
+ CollectionAdminRequest.createCollection(collection, "conf", 2, 1)
+ .process(cluster.getSolrClient());
- try {
- deleteShard(SHARD1);
- fail("Deleting an active shard should not have succeeded");
- } catch (HttpSolrClient.RemoteSolrException e) {
- // expected
- }
+ DocCollection state = getCollectionState(collection);
+ assertEquals(State.ACTIVE, state.getSlice("shard1").getState());
+ assertEquals(State.ACTIVE, state.getSlice("shard2").getState());
- setSliceState(SHARD1, Slice.State.INACTIVE);
+ // Can't delete an ACTIVE shard
+ expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.deleteShard(collection, "shard1").process(cluster.getSolrClient());
+ });
- clusterState = cloudClient.getZkStateReader().getClusterState();
+ setSliceState(collection, "shard1", Slice.State.INACTIVE);
- slice1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
+ // Can delete an INATIVE shard
+ CollectionAdminRequest.deleteShard(collection, "shard1").process(cluster.getSolrClient());
+ waitForState("Expected 'shard1' to be removed", collection, (n, c) -> {
+ return c.getSlice("shard1") == null;
+ });
- assertSame("Shard1 is not inactive yet.", Slice.State.INACTIVE, slice1.getState());
-
- deleteShard(SHARD1);
-
- confirmShardDeletion(SHARD1);
-
- setSliceState(SHARD2, Slice.State.CONSTRUCTION);
- deleteShard(SHARD2);
- confirmShardDeletion(SHARD2);
- }
+ // Can delete a shard under construction
+ setSliceState(collection, "shard2", Slice.State.CONSTRUCTION);
+ CollectionAdminRequest.deleteShard(collection, "shard2").process(cluster.getSolrClient());
+ waitForState("Expected 'shard2' to be removed", collection, (n, c) -> {
+ return c.getSlice("shard2") == null;
+ });
- protected void confirmShardDeletion(String shard) throws SolrServerException, KeeperException,
- InterruptedException {
- ZkStateReader zkStateReader = cloudClient.getZkStateReader();
- ClusterState clusterState = zkStateReader.getClusterState();
- int counter = 10;
- while (counter-- > 0) {
- clusterState = zkStateReader.getClusterState();
- if (clusterState.getSlice("collection1", shard) == null) {
- break;
- }
- Thread.sleep(1000);
- }
-
- assertNull("Cluster still contains shard1 even after waiting for it to be deleted.",
- clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1));
}
- protected void deleteShard(String shard) throws SolrServerException, IOException,
+ protected void setSliceState(String collection, String slice, State state) throws SolrServerException, IOException,
KeeperException, InterruptedException {
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionParams.CollectionAction.DELETESHARD.toString());
- params.set("collection", AbstractFullDistribZkTestBase.DEFAULT_COLLECTION);
- params.set("shard", shard);
- SolrRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
- .getBaseURL();
- baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
-
- try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl)) {
- baseServer.setConnectionTimeout(15000);
- baseServer.setSoTimeout(60000);
- baseServer.request(request);
- }
- }
+ CloudSolrClient client = cluster.getSolrClient();
- protected void setSliceState(String slice, State state) throws SolrServerException, IOException,
- KeeperException, InterruptedException {
- DistributedQueue inQueue = Overseer.getStateUpdateQueue(cloudClient.getZkStateReader().getZkClient());
+ // TODO can this be encapsulated better somewhere?
+ DistributedQueue inQueue = Overseer.getStateUpdateQueue(client.getZkStateReader().getZkClient());
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
propMap.put(slice, state.toString());
- propMap.put(ZkStateReader.COLLECTION_PROP, "collection1");
+ propMap.put(ZkStateReader.COLLECTION_PROP, collection);
ZkNodeProps m = new ZkNodeProps(propMap);
- ZkStateReader zkStateReader = cloudClient.getZkStateReader();
inQueue.offer(Utils.toJSON(m));
- boolean transition = false;
-
- for (int counter = 10; counter > 0; counter--) {
- ClusterState clusterState = zkStateReader.getClusterState();
- State sliceState = clusterState.getSlice("collection1", slice).getState();
- if (sliceState == state) {
- transition = true;
- break;
- }
- Thread.sleep(1000);
- }
-
- if (!transition) {
- throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not set shard [" + slice + "] as " + state);
- }
+
+ waitForState("Expected shard " + slice + " to be in state " + state.toString(), collection, (n, c) -> {
+ return c.getSlice(slice).getState() == state;
+ });
+
}
@Test
public void testDirectoryCleanupAfterDeleteShard() throws InterruptedException, IOException, SolrServerException {
- CollectionAdminResponse rsp = new CollectionAdminRequest.Create()
- .setCollectionName("deleteshard_test")
- .setRouterName("implicit")
- .setShards("a,b,c")
- .setReplicationFactor(1)
- .setConfigName("conf1")
- .process(cloudClient);
+
+ final String collection = "deleteshard_test";
+ CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", 1)
+ .setMaxShardsPerNode(2)
+ .process(cluster.getSolrClient());
// Get replica details
- Replica leader = cloudClient.getZkStateReader().getLeaderRetry("deleteshard_test", "a");
- String baseUrl = (String) leader.get("base_url");
- String core = (String) leader.get("core");
+ Replica leader = getCollectionState(collection).getLeader("a");
- String instanceDir;
- String dataDir;
+ CoreStatus coreStatus = getCoreStatus(leader);
+ assertTrue("Instance directory doesn't exist", FileUtils.fileExists(coreStatus.getInstanceDirectory()));
+ assertTrue("Data directory doesn't exist", FileUtils.fileExists(coreStatus.getDataDirectory()));
- try (HttpSolrClient client = getHttpSolrClient(baseUrl)) {
- CoreAdminResponse statusResp = CoreAdminRequest.getStatus(core, client);
- NamedList r = statusResp.getCoreStatus().get(core);
- instanceDir = (String) r.findRecursive("instanceDir");
- dataDir = (String) r.get("dataDir");
- }
+ assertEquals(3, getCollectionState(collection).getActiveSlices().size());
- assertTrue("Instance directory doesn't exist", FileUtils.fileExists(instanceDir));
- assertTrue("Data directory doesn't exist", FileUtils.fileExists(dataDir));
+ // Delete shard 'a'
+ CollectionAdminRequest.deleteShard(collection, "a").process(cluster.getSolrClient());
- assertEquals(3, cloudClient.getZkStateReader().getClusterState().getActiveSlices("deleteshard_test").size());
+ assertEquals(2, getCollectionState(collection).getActiveSlices().size());
+ assertFalse("Instance directory still exists", FileUtils.fileExists(coreStatus.getInstanceDirectory()));
+ assertFalse("Data directory still exists", FileUtils.fileExists(coreStatus.getDataDirectory()));
- // Delete shard 'a'
- new CollectionAdminRequest.DeleteShard()
- .setCollectionName("deleteshard_test")
- .setShardName("a")
- .process(cloudClient);
-
- assertEquals(2, cloudClient.getZkStateReader().getClusterState().getActiveSlices("deleteshard_test").size());
- assertFalse("Instance directory still exists", FileUtils.fileExists(instanceDir));
- assertFalse("Data directory still exists", FileUtils.fileExists(dataDir));
-
- leader = cloudClient.getZkStateReader().getLeaderRetry("deleteshard_test", "b");
- baseUrl = (String) leader.get("base_url");
- core = (String) leader.get("core");
-
- try (HttpSolrClient client = getHttpSolrClient(baseUrl)) {
- CoreAdminResponse statusResp = CoreAdminRequest.getStatus(core, client);
- NamedList r = statusResp.getCoreStatus().get(core);
- instanceDir = (String) r.findRecursive("instanceDir");
- dataDir = (String) r.get("dataDir");
- }
+ leader = getCollectionState(collection).getLeader("b");
+ coreStatus = getCoreStatus(leader);
// Delete shard 'b'
- new CollectionAdminRequest.DeleteShard()
- .setCollectionName("deleteshard_test")
- .setShardName("b")
+ CollectionAdminRequest.deleteShard(collection, "b")
.setDeleteDataDir(false)
.setDeleteInstanceDir(false)
- .process(cloudClient);
+ .process(cluster.getSolrClient());
- assertEquals(1, cloudClient.getZkStateReader().getClusterState().getActiveSlices("deleteshard_test").size());
- assertTrue("Instance directory still exists", FileUtils.fileExists(instanceDir));
- assertTrue("Data directory still exists", FileUtils.fileExists(dataDir));
+ assertEquals(1, getCollectionState(collection).getActiveSlices().size());
+ assertTrue("Instance directory still exists", FileUtils.fileExists(coreStatus.getInstanceDirectory()));
+ assertTrue("Data directory still exists", FileUtils.fileExists(coreStatus.getDataDirectory()));
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/OverseerModifyCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerModifyCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerModifyCollectionTest.java
index e902ab4..c9a90a5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerModifyCollectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerModifyCollectionTest.java
@@ -17,78 +17,62 @@
package org.apache.solr.cloud;
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
-import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
-
-import java.lang.invoke.MethodHandles;
import java.util.Map;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
import org.apache.solr.client.solrj.request.GenericSolrRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.ConfigSetAdminResponse;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Utils;
import org.apache.zookeeper.KeeperException;
+import org.junit.BeforeClass;
import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-public class OverseerModifyCollectionTest extends AbstractFullDistribZkTestBase {
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
+import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
+
+public class OverseerModifyCollectionTest extends SolrCloudTestCase {
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf1", configset("cloud-minimal"))
+ .addConfig("conf2", configset("cloud-minimal"))
+ .configure();
+ }
+
@Test
public void testModifyColl() throws Exception {
- String collName = "modifyColl";
- String newConfName = "conf" + random().nextInt();
- String oldConfName = "conf1";
- try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
- CollectionAdminResponse rsp;
- CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collName, oldConfName, 1, 2);
- rsp = create.process(client);
- assertEquals(0, rsp.getStatus());
- assertTrue(rsp.isSuccess());
-
- ConfigSetAdminRequest.Create createConfig = new ConfigSetAdminRequest.Create()
- .setBaseConfigSetName(oldConfName)
- .setConfigSetName(newConfName);
-
- ConfigSetAdminResponse configRsp = createConfig.process(client);
-
- assertEquals(0, configRsp.getStatus());
-
- ModifiableSolrParams p = new ModifiableSolrParams();
- p.add("collection", collName);
- p.add("action", "MODIFYCOLLECTION");
- p.add("collection.configName", newConfName);
- client.request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p));
- }
-
- assertEquals(newConfName, getConfigNameFromZk(collName));
+
+ final String collName = "modifyColl";
+
+ CollectionAdminRequest.createCollection(collName, "conf1", 1, 2)
+ .process(cluster.getSolrClient());
+
+ // TODO create a modifyCollection() method on CollectionAdminRequest
+ ModifiableSolrParams p1 = new ModifiableSolrParams();
+ p1.add("collection", collName);
+ p1.add("action", "MODIFYCOLLECTION");
+ p1.add("collection.configName", "conf2");
+ cluster.getSolrClient().request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p1));
+
+ assertEquals("conf2", getConfigNameFromZk(collName));
//Try an invalid config name
- try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
- ModifiableSolrParams p = new ModifiableSolrParams();
- p.add("collection", collName);
- p.add("action", "MODIFYCOLLECTION");
- p.add("collection.configName", "notARealConfigName");
- try{
- client.request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p));
- fail("Exception should be thrown");
- } catch(RemoteSolrException e) {
- assertTrue(e.getMessage(), e.getMessage().contains("Can not find the specified config set"));
- }
- }
+ ModifiableSolrParams p2 = new ModifiableSolrParams();
+ p2.add("collection", collName);
+ p2.add("action", "MODIFYCOLLECTION");
+ p2.add("collection.configName", "notARealConfigName");
+ Exception e = expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p2));
+ });
+
+ assertTrue(e.getMessage(), e.getMessage().contains("Can not find the specified config set"));
}
private String getConfigNameFromZk(String collName) throws KeeperException, InterruptedException {
- byte[] b = cloudClient.getZkStateReader().getZkClient().getData(ZkStateReader.getCollectionPathRoot(collName), null, null, false);
+ byte[] b = zkClient().getData(ZkStateReader.getCollectionPathRoot(collName), null, null, false);
Map confData = (Map) Utils.fromJSON(b);
return (String) confData.get(ZkController.CONFIGNAME_PROP);
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
index dec54d9..762bbeb 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
@@ -16,118 +16,95 @@
*/
package org.apache.solr.cloud;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.SolrRequest;
+import java.lang.invoke.MethodHandles;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.Utils;
import org.apache.solr.util.TimeOut;
import org.apache.zookeeper.data.Stat;
+import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getLeaderNode;
import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.common.util.Utils.makeMap;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.hamcrest.CoreMatchers.not;
-@LuceneTestCase.Slow
-@SuppressSSL(bugUrl = "SOLR-5776")
-public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
+public class OverseerRolesTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- protected String getSolrXml() {
- return "solr.xml";
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(4)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
}
- public OverseerRolesTest() {
- sliceCount = 2;
- fixShardCount(TEST_NIGHTLY ? 6 : 2);
- }
-
- @Test
- public void test() throws Exception {
- try (CloudSolrClient client = createCloudClient(null)) {
- testQuitCommand(client);
- testOverseerRole(client);
+ @Before
+ public void clearAllOverseerRoles() throws Exception {
+ for (String node : OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient())) {
+ CollectionAdminRequest.removeRole(node, "overseer").process(cluster.getSolrClient());
}
}
- private void testQuitCommand(CloudSolrClient client) throws Exception{
- String collectionName = "testOverseerQuit";
-
- createCollection(collectionName, client);
-
- waitForRecoveriesToFinish(collectionName, false);
+ @Test
+ public void testQuitCommand() throws Exception {
- SolrZkClient zk = client.getZkStateReader().getZkClient();
- byte[] data = new byte[0];
- data = zk.getData("/overseer_elect/leader", null, new Stat(), true);
+ SolrZkClient zk = zkClient();
+ byte[] data = zk.getData("/overseer_elect/leader", null, new Stat(), true);
Map m = (Map) Utils.fromJSON(data);
String s = (String) m.get("id");
String leader = LeaderElector.getNodeName(s);
- Overseer.getStateUpdateQueue(zk).offer(Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower())));
+ log.info("Current overseer: {}", leader);
+ Overseer.getStateUpdateQueue(zk)
+ .offer(Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower(),
+ "id", s)));
final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
- String newLeader=null;
+ String newLeader = null;
for(;! timeout.hasTimedOut();){
newLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(zk);
- if(newLeader!=null && !newLeader.equals(leader)) break;
+ if (newLeader != null && !newLeader.equals(leader))
+ break;
Thread.sleep(100);
}
- assertNotSame( "Leader not changed yet",newLeader,leader);
-
+ assertThat("Leader not changed yet", newLeader, not(leader));
-
- assertTrue("The old leader should have rejoined election ", OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zk).contains(leader));
+ assertTrue("The old leader should have rejoined election",
+ OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zk).contains(leader));
}
+ @Test
+ public void testOverseerRole() throws Exception {
-
-
- private void testOverseerRole(CloudSolrClient client) throws Exception {
- String collectionName = "testOverseerCol";
-
- createCollection(collectionName, client);
-
- waitForRecoveriesToFinish(collectionName, false);
- List<String> l = OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(client.getZkStateReader().getZkClient()) ;
+ List<String> l = OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient()) ;
log.info("All nodes {}", l);
- String currentLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(client.getZkStateReader().getZkClient());
+ String currentLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient());
log.info("Current leader {} ", currentLeader);
l.remove(currentLeader);
Collections.shuffle(l, random());
String overseerDesignate = l.get(0);
- log.info("overseerDesignate {}",overseerDesignate);
- setOverseerRole(client, CollectionAction.ADDROLE,overseerDesignate);
+ log.info("overseerDesignate {}", overseerDesignate);
+
+ CollectionAdminRequest.addRole(overseerDesignate, "overseer").process(cluster.getSolrClient());
TimeOut timeout = new TimeOut(15, TimeUnit.SECONDS);
boolean leaderchanged = false;
- for(;!timeout.hasTimedOut();){
- if(overseerDesignate.equals(OverseerCollectionConfigSetProcessor.getLeaderNode(client.getZkStateReader().getZkClient()))){
+ for (;!timeout.hasTimedOut();) {
+ if (overseerDesignate.equals(OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient()))) {
log.info("overseer designate is the new overseer");
leaderchanged =true;
break;
@@ -136,36 +113,29 @@ public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
}
assertTrue("could not set the new overseer . expected "+
overseerDesignate + " current order : " +
- getSortedOverseerNodeNames(client.getZkStateReader().getZkClient()) +
- " ldr :"+ OverseerCollectionConfigSetProcessor.getLeaderNode(client.getZkStateReader().getZkClient()) ,leaderchanged);
-
-
+ getSortedOverseerNodeNames(zkClient()) +
+ " ldr :"+ OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient()) ,leaderchanged);
//add another node as overseer
-
-
l.remove(overseerDesignate);
-
Collections.shuffle(l, random());
String anotherOverseer = l.get(0);
log.info("Adding another overseer designate {}", anotherOverseer);
- setOverseerRole(client, CollectionAction.ADDROLE, anotherOverseer);
+ CollectionAdminRequest.addRole(anotherOverseer, "overseer").process(cluster.getSolrClient());
- String currentOverseer = getLeaderNode(client.getZkStateReader().getZkClient());
+ String currentOverseer = getLeaderNode(zkClient());
log.info("Current Overseer {}", currentOverseer);
- String hostPort = currentOverseer.substring(0,currentOverseer.indexOf('_'));
+ String hostPort = currentOverseer.substring(0, currentOverseer.indexOf('_'));
StringBuilder sb = new StringBuilder();
-//
-//
log.info("hostPort : {}", hostPort);
JettySolrRunner leaderJetty = null;
- for (JettySolrRunner jetty : jettys) {
+ for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
String s = jetty.getBaseUrl().toString();
log.info("jetTy {}",s);
sb.append(s).append(" , ");
@@ -178,49 +148,20 @@ public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
assertNotNull("Could not find a jetty2 kill", leaderJetty);
log.info("leader node {}", leaderJetty.getBaseUrl());
- log.info ("current election Queue",
- OverseerCollectionConfigSetProcessor.getSortedElectionNodes(client.getZkStateReader().getZkClient(),
- "/overseer_elect/election"));
+ log.info("current election Queue",
+ OverseerCollectionConfigSetProcessor.getSortedElectionNodes(zkClient(), "/overseer_elect/election"));
ChaosMonkey.stop(leaderJetty);
timeout = new TimeOut(10, TimeUnit.SECONDS);
leaderchanged = false;
for (; !timeout.hasTimedOut(); ) {
- currentOverseer = getLeaderNode(client.getZkStateReader().getZkClient());
+ currentOverseer = getLeaderNode(zkClient());
if (anotherOverseer.equals(currentOverseer)) {
leaderchanged = true;
break;
}
Thread.sleep(100);
}
- assertTrue("New overseer designate has not become the overseer, expected : " + anotherOverseer + "actual : " + getLeaderNode(client.getZkStateReader().getZkClient()), leaderchanged);
+ assertTrue("New overseer designate has not become the overseer, expected : " + anotherOverseer + "actual : " + getLeaderNode(zkClient()), leaderchanged);
}
- private void setOverseerRole(CloudSolrClient client, CollectionAction action, String overseerDesignate) throws Exception, IOException {
- log.info("Adding overseer designate {} ", overseerDesignate);
- Map m = makeMap(
- "action", action.toString().toLowerCase(Locale.ROOT),
- "role", "overseer",
- "node", overseerDesignate);
- SolrParams params = new MapSolrParams(m);
- SolrRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
- client.request(request);
- }
-
-
- protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
- int replicationFactor = 2;
- int numShards = 4;
- int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
- .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
- Map<String, Object> props = makeMap(
- REPLICATION_FACTOR, replicationFactor,
- MAX_SHARDS_PER_NODE, maxShardsPerNode,
- NUM_SLICES, numShards);
- Map<String,List<Integer>> collectionInfos = new HashMap<>();
- createCollection(collectionInfos, COLL_NAME, props, client);
- }
-
-
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
index b1899da..80fd38e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerStatusTest.java
@@ -17,74 +17,56 @@
package org.apache.solr.cloud;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
+import org.junit.BeforeClass;
import org.junit.Test;
-public class OverseerStatusTest extends BasicDistributedZkTest {
+public class OverseerStatusTest extends SolrCloudTestCase {
- public OverseerStatusTest() {
- schemaString = "schema15.xml"; // we need a string id
- sliceCount = 1;
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();;
}
@Test
- @ShardsFixed(num = 1)
public void test() throws Exception {
- waitForThingsToLevelOut(15);
-
// find existing command counts because collection may be created by base test class too
int numCollectionCreates = 0, numOverseerCreates = 0;
- NamedList<Object> resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
- if (resp != null) {
- NamedList<Object> collection_operations = (NamedList<Object>) resp.get("collection_operations");
- if (collection_operations != null) {
- SimpleOrderedMap<Object> createcollection = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
- if (createcollection != null && createcollection.get("requests") != null) {
- numCollectionCreates = (Integer) createcollection.get("requests");
- }
- NamedList<Object> overseer_operations = (NamedList<Object>) resp.get("overseer_operations");
- if (overseer_operations != null) {
- createcollection = (SimpleOrderedMap<Object>) overseer_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
- if (createcollection != null && createcollection.get("requests") != null) {
- numOverseerCreates = (Integer) createcollection.get("requests");
- }
- }
- }
- }
String collectionName = "overseer_status_test";
- CollectionAdminResponse response = createCollection(collectionName, 1, 1, 1);
- resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
+ CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
+
+ NamedList<Object> resp = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient()).getResponse();
NamedList<Object> collection_operations = (NamedList<Object>) resp.get("collection_operations");
NamedList<Object> overseer_operations = (NamedList<Object>) resp.get("overseer_operations");
- SimpleOrderedMap<Object> createcollection = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
+ SimpleOrderedMap<Object> createcollection
+ = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
assertEquals("No stats for create in OverseerCollectionProcessor", numCollectionCreates + 1, createcollection.get("requests"));
createcollection = (SimpleOrderedMap<Object>) overseer_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
assertEquals("No stats for create in Overseer", numOverseerCreates + 1, createcollection.get("requests"));
// Reload the collection
- new CollectionAdminRequest.Reload().setCollectionName(collectionName).process(cloudClient);
-
+ CollectionAdminRequest.reloadCollection(collectionName).process(cluster.getSolrClient());
- resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
+ resp = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient()).getResponse();
collection_operations = (NamedList<Object>) resp.get("collection_operations");
SimpleOrderedMap<Object> reload = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.RELOAD.toLower());
assertEquals("No stats for reload in OverseerCollectionProcessor", 1, reload.get("requests"));
try {
- new CollectionAdminRequest.SplitShard()
- .setCollectionName("non_existent_collection")
- .setShardName("non_existent_shard")
- .process(cloudClient);
+ CollectionAdminRequest.splitShard("non_existent_collection")
+ .setShardName("non_existent_shard")
+ .process(cluster.getSolrClient());
fail("Split shard for non existent collection should have failed");
} catch (Exception e) {
// expected because we did not correctly specify required params for split
}
- resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
+ resp = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient()).getResponse();
collection_operations = (NamedList<Object>) resp.get("collection_operations");
SimpleOrderedMap<Object> split = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.SPLITSHARD.toLower());
assertEquals("No stats for split in OverseerCollectionProcessor", 1, split.get("errors"));
@@ -111,6 +93,5 @@ public class OverseerStatusTest extends BasicDistributedZkTest {
assertNotNull(updateState.get("errors"));
assertNotNull(updateState.get("avgTimePerRequest"));
- waitForThingsToLevelOut(15);
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java b/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java
index 24f9696..54503bf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java
@@ -16,58 +16,43 @@
*/
package org.apache.solr.cloud;
-import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
+import org.junit.BeforeClass;
import org.junit.Test;
-import java.util.ArrayList;
-import java.util.List;
-
import static org.junit.internal.matchers.StringContains.containsString;
/**
* Verify that remote (proxied) queries return proper error messages
*/
-@Slow
-public class RemoteQueryErrorTest extends AbstractFullDistribZkTestBase {
+public class RemoteQueryErrorTest extends SolrCloudTestCase {
- public RemoteQueryErrorTest() {
- super();
- sliceCount = 1;
- fixShardCount(random().nextBoolean() ? 3 : 4);
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(3)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
}
+ // TODO add test for CloudSolrClient as well
+
@Test
public void test() throws Exception {
- handle.clear();
- handle.put("timestamp", SKIPVAL);
-
- waitForThingsToLevelOut(15);
- del("*:*");
-
- createCollection("collection2", 2, 1, 10);
-
- List<Integer> numShardsNumReplicaList = new ArrayList<>(2);
- numShardsNumReplicaList.add(2);
- numShardsNumReplicaList.add(1);
- checkForCollection("collection2", numShardsNumReplicaList, null);
- waitForRecoveriesToFinish("collection2", true);
+ CollectionAdminRequest.createCollection("collection", "conf", 2, 1).process(cluster.getSolrClient());
- for (SolrClient solrClient : clients) {
- try {
- SolrInputDocument emptyDoc = new SolrInputDocument();
- solrClient.add(emptyDoc);
- fail("Expected unique key exception");
- } catch (SolrException ex) {
- assertThat(ex.getMessage(), containsString("Document is missing mandatory uniqueKey field: id"));
- } catch(Exception ex) {
- fail("Expected a SolrException to occur, instead received: " + ex.getClass());
- } finally {
- solrClient.close();
+ for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+ try (SolrClient client = jetty.newClient()) {
+ SolrException e = expectThrows(SolrException.class, () -> {
+ client.add("collection", new SolrInputDocument());
+ });
+ assertThat(e.getMessage(), containsString("Document is missing mandatory uniqueKey field: id"));
}
}
+
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/TestDownShardTolerantSearch.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestDownShardTolerantSearch.java b/solr/core/src/test/org/apache/solr/cloud/TestDownShardTolerantSearch.java
index 01c4440..415d4e4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestDownShardTolerantSearch.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestDownShardTolerantSearch.java
@@ -16,13 +16,15 @@
*/
package org.apache.solr.cloud;
-import java.io.IOException;
import java.lang.invoke.MethodHandles;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.params.ShardParams;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -34,41 +36,47 @@ import static org.hamcrest.CoreMatchers.is;
* and also asserts that a meaningful exception is thrown when shards.tolerant=false
* See SOLR-7566
*/
-public class TestDownShardTolerantSearch extends AbstractFullDistribZkTestBase {
+public class TestDownShardTolerantSearch extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- public TestDownShardTolerantSearch() {
- sliceCount = 2;
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
}
@Test
- @ShardsFixed(num = 2)
public void searchingShouldFailWithoutTolerantSearchSetToTrue() throws Exception {
- waitForRecoveriesToFinish(true);
- indexAbunchOfDocs();
- commit();
- QueryResponse response = cloudClient.query(new SolrQuery("*:*").setRows(1));
+ CollectionAdminRequest.createCollection("tolerant", "conf", 2, 1)
+ .process(cluster.getSolrClient());
+
+ UpdateRequest update = new UpdateRequest();
+ for (int i = 0; i < 100; i++) {
+ update.add("id", Integer.toString(i));
+ }
+ update.commit(cluster.getSolrClient(), "tolerant");
+
+ QueryResponse response = cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1));
assertThat(response.getStatus(), is(0));
- assertThat(response.getResults().getNumFound(), is(66L));
+ assertThat(response.getResults().getNumFound(), is(100L));
- ChaosMonkey.kill(shardToJetty.get(SHARD1).get(0));
+ cluster.stopJettySolrRunner(0);
- response = cloudClient.query(new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, true));
+ response = cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, true));
assertThat(response.getStatus(), is(0));
assertTrue(response.getResults().getNumFound() > 0);
try {
- cloudClient.query(new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, false));
+ cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, false));
fail("Request should have failed because we killed shard1 jetty");
} catch (SolrServerException e) {
log.info("error from server", e);
assertNotNull(e.getCause());
assertTrue("Error message from server should have the name of the down shard",
- e.getCause().getMessage().contains(SHARD1));
- } catch (IOException e) {
- e.printStackTrace();
+ e.getCause().getMessage().contains("shard"));
}
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java b/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java
index 9ef2dcd..5bf77c1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java
@@ -16,34 +16,32 @@
*/
package org.apache.solr.cloud;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.junit.BeforeClass;
import org.junit.Test;
-@LuceneTestCase.Slow
-public class TestExclusionRuleCollectionAccess extends AbstractFullDistribZkTestBase {
+public class TestExclusionRuleCollectionAccess extends SolrCloudTestCase {
- public TestExclusionRuleCollectionAccess() {
- schemaString = "schema15.xml"; // we need a string id
- sliceCount = 1;
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(1)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
}
@Test
public void doTest() throws Exception {
- CollectionAdminRequest.Create req = new CollectionAdminRequest.Create();
- req.setCollectionName("css33");
- req.setNumShards(1);
- req.process(cloudClient);
-
- waitForRecoveriesToFinish("css33", false);
-
- try (SolrClient c = createCloudClient("css33")) {
- c.add(getDoc("id", "1"));
- c.commit();
-
- assertEquals("Should have returned 1 result", 1, c.query(params("q", "*:*", "collection", "css33")).getResults().getNumFound());
- }
+
+ CollectionAdminRequest.createCollection("css33", "conf", 1, 1).process(cluster.getSolrClient());
+
+ new UpdateRequest()
+ .add("id", "1")
+ .commit(cluster.getSolrClient(), "css33");
+
+ assertEquals("Should have returned 1 result", 1,
+ cluster.getSolrClient().query("css33", params("q", "*:*", "collection", "css33")).getResults().getNumFound());
+
}
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java b/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java
index 30fe933..bc4f4e5 100644
--- a/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java
@@ -18,19 +18,21 @@ package org.apache.solr.security;
import javax.servlet.ServletRequest;
import javax.servlet.http.HttpServletRequest;
-
import java.lang.invoke.MethodHandles;
import java.security.Principal;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
-import org.apache.solr.SolrTestCaseJ4;
+import org.apache.http.client.HttpClient;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Utils;
+import org.junit.After;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -39,27 +41,32 @@ import static java.util.Collections.singletonMap;
import static org.apache.solr.common.util.Utils.makeMap;
import static org.apache.solr.security.TestAuthorizationFramework.verifySecurityStatus;
-@SolrTestCaseJ4.SuppressSSL
-public class PKIAuthenticationIntegrationTest extends AbstractFullDistribZkTestBase {
+public class PKIAuthenticationIntegrationTest extends SolrCloudTestCase {
+
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- static final int TIMEOUT = 10000;
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
+ }
@Test
public void testPkiAuth() throws Exception {
- waitForThingsToLevelOut(10);
+ CollectionAdminRequest.createCollection("collection", "conf", 2, 1).process(cluster.getSolrClient());
+
+ // TODO make a SolrJ helper class for this
byte[] bytes = Utils.toJSON(makeMap("authorization", singletonMap("class", MockAuthorizationPlugin.class.getName()),
"authentication", singletonMap("class", MockAuthenticationPlugin.class.getName())));
+ zkClient().setData(ZkStateReader.SOLR_SECURITY_CONF_PATH, bytes, true);
- try (ZkStateReader zkStateReader = new ZkStateReader(zkServer.getZkAddress(),
- TIMEOUT, TIMEOUT)) {
- zkStateReader.getZkClient().setData(ZkStateReader.SOLR_SECURITY_CONF_PATH, bytes, true);
- }
- for (JettySolrRunner jetty : jettys) {
+ HttpClient httpClient = cluster.getSolrClient().getHttpClient();
+ for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
String baseUrl = jetty.getBaseUrl().toString();
- verifySecurityStatus(cloudClient.getLbClient().getHttpClient(), baseUrl + "/admin/authorization", "authorization/class", MockAuthorizationPlugin.class.getName(), 20);
- verifySecurityStatus(cloudClient.getLbClient().getHttpClient(), baseUrl + "/admin/authentication", "authentication.enabled", "true", 20);
+ verifySecurityStatus(httpClient, baseUrl + "/admin/authorization", "authorization/class", MockAuthorizationPlugin.class.getName(), 20);
+ verifySecurityStatus(httpClient, baseUrl + "/admin/authentication", "authentication.enabled", "true", 20);
}
log.info("Starting test");
ModifiableSolrParams params = new ModifiableSolrParams();
@@ -95,13 +102,12 @@ public class PKIAuthenticationIntegrationTest extends AbstractFullDistribZkTestB
}
};
QueryRequest query = new QueryRequest(params);
- query.process(cloudClient);
+ query.process(cluster.getSolrClient(), "collection");
assertTrue("all nodes must get the user solr , no:of nodes got solr : " + count.get(),count.get() > 2);
}
- @Override
+ @After
public void distribTearDown() throws Exception {
- super.distribTearDown();
MockAuthenticationPlugin.predicate = null;
MockAuthorizationPlugin.predicate = null;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/183f9980/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index 0beaa55..94750c0a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -280,6 +280,8 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
public CollectionAdminRoleRequest(CollectionAction action, String node, String role) {
super(action);
+ this.node = node;
+ this.role = role;
}
@Override