You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by tf...@apache.org on 2017/05/12 23:39:22 UTC
[55/58] [abbrv] lucene-solr:jira/solr-10233: Rename replica types
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPassiveReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPassiveReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPassiveReplicasTest.java
deleted file mode 100644
index 8639fba..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPassiveReplicasTest.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.SolrTestCaseJ4.SuppressObjectReleaseTracker;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.util.TestInjection;
-import org.apache.solr.util.TimeOut;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Slow
-@SuppressObjectReleaseTracker(bugUrl="Testing purposes")
-public class ChaosMonkeySafeLeaderWithPassiveReplicasTest extends AbstractFullDistribZkTestBase {
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
- private static final Integer RUN_LENGTH = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.runlength", "-1"));
-
- private final boolean useAppendReplicas = random().nextBoolean();
-
- private final int numPassiveReplicas;
- private final int numRealtimeOrAppendReplicas;
-
- @Override
- protected int getPassiveReplicaCount() {
- return numPassiveReplicas;
- }
-
- @Override
- protected boolean useAppendReplicas() {
- return useAppendReplicas;
- }
-
- @BeforeClass
- public static void beforeSuperClass() {
- schemaString = "schema15.xml"; // we need a string id
- System.setProperty("solr.autoCommit.maxTime", "15000");
- TestInjection.waitForReplicasInSync = null;
- setErrorHook();
- }
-
- @AfterClass
- public static void afterSuperClass() {
- System.clearProperty("solr.autoCommit.maxTime");
- clearErrorHook();
- TestInjection.reset();
- }
-
- protected static final String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
- protected static final RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
-
- public String[] getFieldNames() {
- return fieldNames;
- }
-
- public RandVal[] getRandValues() {
- return randVals;
- }
-
- @Override
- public void distribSetUp() throws Exception {
- useFactory("solr.StandardDirectoryFactory");
- super.distribSetUp();
- }
-
- public ChaosMonkeySafeLeaderWithPassiveReplicasTest() {
- super();
- numPassiveReplicas = random().nextInt(TEST_NIGHTLY ? 3 : 2) + 1;;
- numRealtimeOrAppendReplicas = random().nextInt(TEST_NIGHTLY ? 3 : 2) + 1;;
- sliceCount = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.slicecount", "-1"));
- if (sliceCount == -1) {
- sliceCount = random().nextInt(TEST_NIGHTLY ? 3 : 2) + 1;
- }
-
- int numNodes = sliceCount * (numRealtimeOrAppendReplicas + numPassiveReplicas);
- fixShardCount(numNodes);
- log.info("Starting ChaosMonkey test with {} shards and {} nodes", sliceCount, numNodes);
- }
-
- @Test
- public void test() throws Exception {
- DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
- assertEquals(this.sliceCount, docCollection.getSlices().size());
- Slice s = docCollection.getSlice("shard1");
- assertNotNull(s);
- assertEquals("Unexpected number of replicas. Collection: " + docCollection, numRealtimeOrAppendReplicas + numPassiveReplicas, s.getReplicas().size());
- assertEquals("Unexpected number of passive replicas. Collection: " + docCollection, numPassiveReplicas, s.getReplicas(EnumSet.of(Replica.Type.PASSIVE)).size());
- assertEquals(useAppendReplicas()?0:numRealtimeOrAppendReplicas, s.getReplicas(EnumSet.of(Replica.Type.REALTIME)).size());
- assertEquals(useAppendReplicas()?numRealtimeOrAppendReplicas:0, s.getReplicas(EnumSet.of(Replica.Type.APPEND)).size());
- handle.clear();
- handle.put("timestamp", SKIPVAL);
-
- // randomly turn on 1 seconds 'soft' commit
- randomlyEnableAutoSoftCommit();
-
- tryDelete();
-
- List<StoppableThread> threads = new ArrayList<>();
- int threadCount = 2;
- int batchSize = 1;
- if (random().nextBoolean()) {
- batchSize = random().nextInt(98) + 2;
- }
-
- boolean pauseBetweenUpdates = TEST_NIGHTLY ? random().nextBoolean() : true;
- int maxUpdates = -1;
- if (!pauseBetweenUpdates) {
- maxUpdates = 1000 + random().nextInt(1000);
- } else {
- maxUpdates = 15000;
- }
-
- for (int i = 0; i < threadCount; i++) {
- StoppableIndexingThread indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true, maxUpdates, batchSize, pauseBetweenUpdates); // random().nextInt(999) + 1
- threads.add(indexThread);
- indexThread.start();
- }
-
- StoppableCommitThread commitThread = new StoppableCommitThread(cloudClient, 1000, false);
- threads.add(commitThread);
- commitThread.start();
-
- chaosMonkey.startTheMonkey(false, 500);
- try {
- long runLength;
- if (RUN_LENGTH != -1) {
- runLength = RUN_LENGTH;
- } else {
- int[] runTimes;
- if (TEST_NIGHTLY) {
- runTimes = new int[] {5000, 6000, 10000, 15000, 25000, 30000,
- 30000, 45000, 90000, 120000};
- } else {
- runTimes = new int[] {5000, 7000, 15000};
- }
- runLength = runTimes[random().nextInt(runTimes.length - 1)];
- }
-
- ChaosMonkey.wait(runLength, DEFAULT_COLLECTION, cloudClient.getZkStateReader());
- } finally {
- chaosMonkey.stopTheMonkey();
- }
-
- for (StoppableThread thread : threads) {
- thread.safeStop();
- }
-
- // wait for stop...
- for (StoppableThread thread : threads) {
- thread.join();
- }
-
- for (StoppableThread thread : threads) {
- if (thread instanceof StoppableIndexingThread) {
- assertEquals(0, ((StoppableIndexingThread)thread).getFailCount());
- }
- }
-
- // try and wait for any replications and what not to finish...
-
- Thread.sleep(2000);
-
- waitForThingsToLevelOut(180000);
-
- // even if things were leveled out, a jetty may have just been stopped or something
- // we wait again and wait to level out again to make sure the system is not still in flux
-
- Thread.sleep(3000);
-
- waitForThingsToLevelOut(180000);
-
- log.info("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
-
- waitForReplicationFromReplicas(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), new TimeOut(30, TimeUnit.SECONDS));
-
- checkShardConsistency(batchSize == 1, true);
-
- // try and make a collection to make sure the overseer has survived the expiration and session loss
-
- // sometimes we restart zookeeper as well
- if (random().nextBoolean()) {
- zkServer.shutdown();
- zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
- zkServer.run();
- }
-
- try (CloudSolrClient client = createCloudClient("collection1")) {
- createCollection(null, "testcollection", 1, 1, 100, client, null, "conf1");
-
- }
- List<Integer> numShardsNumReplicas = new ArrayList<>(2);
- numShardsNumReplicas.add(1);
- numShardsNumReplicas.add(1 + getPassiveReplicaCount());
- checkForCollection("testcollection",numShardsNumReplicas, null);
- }
-
- private void tryDelete() throws Exception {
- long start = System.nanoTime();
- long timeout = start + TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS);
- while (System.nanoTime() < timeout) {
- try {
- del("*:*");
- break;
- } catch (SolrServerException e) {
- // cluster may not be up yet
- e.printStackTrace();
- }
- Thread.sleep(100);
- }
- }
-
- // skip the randoms - they can deadlock...
- @Override
- protected void indexr(Object... fields) throws Exception {
- SolrInputDocument doc = new SolrInputDocument();
- addFields(doc, fields);
- addFields(doc, "rnd_b", true);
- indexDoc(doc);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java
new file mode 100644
index 0000000..62d77a6
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java
@@ -0,0 +1,251 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.SolrTestCaseJ4.SuppressObjectReleaseTracker;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Slow
+@SuppressObjectReleaseTracker(bugUrl="Testing purposes")
+public class ChaosMonkeySafeLeaderWithPullReplicasTest extends AbstractFullDistribZkTestBase {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ private static final Integer RUN_LENGTH = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.runlength", "-1"));
+
+ private final boolean useTlogReplicas = random().nextBoolean();
+
+ private final int numPullReplicas;
+ private final int numRealtimeOrTlogReplicas;
+
+ @Override
+ protected int getPullReplicaCount() {
+ return numPullReplicas;
+ }
+
+ @Override
+ protected boolean useTlogReplicas() {
+ return useTlogReplicas;
+ }
+
+ @BeforeClass
+ public static void beforeSuperClass() {
+ schemaString = "schema15.xml"; // we need a string id
+ System.setProperty("solr.autoCommit.maxTime", "15000");
+ TestInjection.waitForReplicasInSync = null;
+ setErrorHook();
+ }
+
+ @AfterClass
+ public static void afterSuperClass() {
+ System.clearProperty("solr.autoCommit.maxTime");
+ clearErrorHook();
+ TestInjection.reset();
+ }
+
+ protected static final String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
+ protected static final RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
+
+ public String[] getFieldNames() {
+ return fieldNames;
+ }
+
+ public RandVal[] getRandValues() {
+ return randVals;
+ }
+
+ @Override
+ public void distribSetUp() throws Exception {
+ useFactory("solr.StandardDirectoryFactory");
+ super.distribSetUp();
+ }
+
+ public ChaosMonkeySafeLeaderWithPullReplicasTest() {
+ super();
+ numPullReplicas = random().nextInt(TEST_NIGHTLY ? 3 : 2) + 1;;
+ numRealtimeOrTlogReplicas = random().nextInt(TEST_NIGHTLY ? 3 : 2) + 1;;
+ sliceCount = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.slicecount", "-1"));
+ if (sliceCount == -1) {
+ sliceCount = random().nextInt(TEST_NIGHTLY ? 3 : 2) + 1;
+ }
+
+ int numNodes = sliceCount * (numRealtimeOrTlogReplicas + numPullReplicas);
+ fixShardCount(numNodes);
+ log.info("Starting ChaosMonkey test with {} shards and {} nodes", sliceCount, numNodes);
+ }
+
+ @Test
+ public void test() throws Exception {
+ DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
+ assertEquals(this.sliceCount, docCollection.getSlices().size());
+ Slice s = docCollection.getSlice("shard1");
+ assertNotNull(s);
+ assertEquals("Unexpected number of replicas. Collection: " + docCollection, numRealtimeOrTlogReplicas + numPullReplicas, s.getReplicas().size());
+ assertEquals("Unexpected number of pull replicas. Collection: " + docCollection, numPullReplicas, s.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
+ assertEquals(useTlogReplicas()?0:numRealtimeOrTlogReplicas, s.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
+ assertEquals(useTlogReplicas()?numRealtimeOrTlogReplicas:0, s.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
+ handle.clear();
+ handle.put("timestamp", SKIPVAL);
+
+ // randomly turn on 1 seconds 'soft' commit
+ randomlyEnableAutoSoftCommit();
+
+ tryDelete();
+
+ List<StoppableThread> threads = new ArrayList<>();
+ int threadCount = 2;
+ int batchSize = 1;
+ if (random().nextBoolean()) {
+ batchSize = random().nextInt(98) + 2;
+ }
+
+ boolean pauseBetweenUpdates = TEST_NIGHTLY ? random().nextBoolean() : true;
+ int maxUpdates = -1;
+ if (!pauseBetweenUpdates) {
+ maxUpdates = 1000 + random().nextInt(1000);
+ } else {
+ maxUpdates = 15000;
+ }
+
+ for (int i = 0; i < threadCount; i++) {
+ StoppableIndexingThread indexThread = new StoppableIndexingThread(controlClient, cloudClient, Integer.toString(i), true, maxUpdates, batchSize, pauseBetweenUpdates); // random().nextInt(999) + 1
+ threads.add(indexThread);
+ indexThread.start();
+ }
+
+ StoppableCommitThread commitThread = new StoppableCommitThread(cloudClient, 1000, false);
+ threads.add(commitThread);
+ commitThread.start();
+
+ chaosMonkey.startTheMonkey(false, 500);
+ try {
+ long runLength;
+ if (RUN_LENGTH != -1) {
+ runLength = RUN_LENGTH;
+ } else {
+ int[] runTimes;
+ if (TEST_NIGHTLY) {
+ runTimes = new int[] {5000, 6000, 10000, 15000, 25000, 30000,
+ 30000, 45000, 90000, 120000};
+ } else {
+ runTimes = new int[] {5000, 7000, 15000};
+ }
+ runLength = runTimes[random().nextInt(runTimes.length - 1)];
+ }
+
+ ChaosMonkey.wait(runLength, DEFAULT_COLLECTION, cloudClient.getZkStateReader());
+ } finally {
+ chaosMonkey.stopTheMonkey();
+ }
+
+ for (StoppableThread thread : threads) {
+ thread.safeStop();
+ }
+
+ // wait for stop...
+ for (StoppableThread thread : threads) {
+ thread.join();
+ }
+
+ for (StoppableThread thread : threads) {
+ if (thread instanceof StoppableIndexingThread) {
+ assertEquals(0, ((StoppableIndexingThread)thread).getFailCount());
+ }
+ }
+
+ // try and wait for any replications and what not to finish...
+
+ Thread.sleep(2000);
+
+ waitForThingsToLevelOut(180000);
+
+ // even if things were leveled out, a jetty may have just been stopped or something
+ // we wait again and wait to level out again to make sure the system is not still in flux
+
+ Thread.sleep(3000);
+
+ waitForThingsToLevelOut(180000);
+
+ log.info("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
+
+ waitForReplicationFromReplicas(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), new TimeOut(30, TimeUnit.SECONDS));
+
+ checkShardConsistency(batchSize == 1, true);
+
+ // try and make a collection to make sure the overseer has survived the expiration and session loss
+
+ // sometimes we restart zookeeper as well
+ if (random().nextBoolean()) {
+ zkServer.shutdown();
+ zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
+ zkServer.run();
+ }
+
+ try (CloudSolrClient client = createCloudClient("collection1")) {
+ createCollection(null, "testcollection", 1, 1, 100, client, null, "conf1");
+
+ }
+ List<Integer> numShardsNumReplicas = new ArrayList<>(2);
+ numShardsNumReplicas.add(1);
+ numShardsNumReplicas.add(1 + getPullReplicaCount());
+ checkForCollection("testcollection",numShardsNumReplicas, null);
+ }
+
+ private void tryDelete() throws Exception {
+ long start = System.nanoTime();
+ long timeout = start + TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS);
+ while (System.nanoTime() < timeout) {
+ try {
+ del("*:*");
+ break;
+ } catch (SolrServerException e) {
+ // cluster may not be up yet
+ e.printStackTrace();
+ }
+ Thread.sleep(100);
+ }
+ }
+
+ // skip the randoms - they can deadlock...
+ @Override
+ protected void indexr(Object... fields) throws Exception {
+ SolrInputDocument doc = new SolrInputDocument();
+ addFields(doc, fields);
+ addFields(doc, "rnd_b", true);
+ indexDoc(doc);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
index e75a854..ea8598b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
@@ -286,7 +286,7 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
// first we make a core with the core name the collections api
// will try and use - this will cause our mock fail
Create createCmd = new Create();
- createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.REALTIME, 1));
+ createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1));
createCmd.setCollection("halfcollectionblocker");
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
@@ -298,7 +298,7 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
}
createCmd = new Create();
- createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.REALTIME, 1));
+ createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1));
createCmd.setCollection("halfcollectionblocker2");
dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index 2a2da78..643660b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -67,7 +67,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
assertEquals(4, coresStatus.size());
for (int i=0; i<4; i++) {
- NamedList<Integer> status = coresStatus.get(Assign.buildCoreName(collectionName, "shard" + (i/2+1), Replica.Type.REALTIME, (i%2+1)));
+ NamedList<Integer> status = coresStatus.get(Assign.buildCoreName(collectionName, "shard" + (i/2+1), Replica.Type.NRT, (i%2+1)));
assertEquals(0, (int)status.get("status"));
assertTrue(status.get("QTime") > 0);
}
@@ -136,9 +136,9 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
assertTrue(response.isSuccess());
coresStatus = response.getCollectionCoresStatus();
assertEquals(3, coresStatus.size());
- assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shardC", Replica.Type.REALTIME, 1)).get("status"));
- assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shardC", Replica.Type.APPEND, 1)).get("status"));
- assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shardC", Replica.Type.PASSIVE, 1)).get("status"));
+ assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shardC", Replica.Type.NRT, 1)).get("status"));
+ assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shardC", Replica.Type.TLOG, 1)).get("status"));
+ assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shardC", Replica.Type.PULL, 1)).get("status"));
response = CollectionAdminRequest.deleteShard(collectionName, "shardC").process(cluster.getSolrClient());
@@ -176,8 +176,8 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
- assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shard1_0" , Replica.Type.REALTIME, 1)).get("status"));
- assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shard1_1" , Replica.Type.REALTIME, 1)).get("status"));
+ assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shard1_0" , Replica.Type.NRT, 1)).get("status"));
+ assertEquals(0, (int) coresStatus.get(Assign.buildCoreName(collectionName, "shard1_1" , Replica.Type.NRT, 1)).get("status"));
waitForState("Expected all shards to be active and parent shard to be removed", collectionName, (n, c) -> {
if (c.getSlice("shard1").getState() == Slice.State.ACTIVE)
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
index db9ecb4..8f35c88 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
@@ -58,7 +58,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
private final boolean onlyLeaderIndexes = random().nextBoolean();
@Override
- protected boolean useAppendReplicas() {
+ protected boolean useTlogReplicas() {
return onlyLeaderIndexes;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index 8406791..2cc1c30 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -85,7 +85,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
}
@Override
- protected boolean useAppendReplicas() {
+ protected boolean useTlogReplicas() {
return onlyLeaderIndexes;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
index 1990919..f3965ac 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java
@@ -46,7 +46,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
}
@Override
- protected boolean useAppendReplicas() {
+ protected boolean useTlogReplicas() {
return onlyLeaderIndexes;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 340adbb..91da2c1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -347,7 +347,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
assertEquals(numberOfSlices * numberOfReplica, coreNames.size());
for (int i = 1; i <= numberOfSlices; i++) {
for (int j = 1; j <= numberOfReplica; j++) {
- String coreName = Assign.buildCoreName(COLLECTION_NAME, "shard" + i, Replica.Type.REALTIME, j);
+ String coreName = Assign.buildCoreName(COLLECTION_NAME, "shard" + i, Replica.Type.NRT, j);
assertTrue("Shard " + coreName + " was not created",
coreNames.contains(coreName));
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/RecoveryAfterSoftCommitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/RecoveryAfterSoftCommitTest.java b/solr/core/src/test/org/apache/solr/cloud/RecoveryAfterSoftCommitTest.java
index eabd9b0..8290e12 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RecoveryAfterSoftCommitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RecoveryAfterSoftCommitTest.java
@@ -40,7 +40,7 @@ public class RecoveryAfterSoftCommitTest extends AbstractFullDistribZkTestBase {
}
@Override
- protected boolean useAppendReplicas() {
+ protected boolean useTlogReplicas() {
return onlyLeaderIndexes;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index e00ea0d..73a0bf7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -81,7 +81,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
}
@Override
- protected boolean useAppendReplicas() {
+ protected boolean useTlogReplicas() {
return false;
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/TestAppendReplica.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestAppendReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestAppendReplica.java
deleted file mode 100644
index a0cc1f5..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestAppendReplica.java
+++ /dev/null
@@ -1,829 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import org.apache.http.client.HttpClient;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.CollectionStatePredicate;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.update.DirectUpdateHandler2;
-import org.apache.solr.update.SolrIndexWriter;
-import org.apache.solr.update.UpdateHandler;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.RefCounted;
-import org.apache.solr.util.TestInjection;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.carrotsearch.randomizedtesting.annotations.Repeat;
-
-@Slow
-public class TestAppendReplica extends SolrCloudTestCase {
-
- private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
- private String collectionName = null;
- private final static int REPLICATION_TIMEOUT_SECS = 10;
-
- private String suggestedCollectionName() {
- return (getTestClass().getSimpleName().replace("Test", "") + "_" + getTestName().split(" ")[0]).replaceAll("(.)(\\p{Upper})", "$1_$2").toLowerCase(Locale.ROOT);
- }
-
- @BeforeClass
- public static void setupCluster() throws Exception {
- TestInjection.waitForReplicasInSync = null; // We'll be explicit about this in this test
- configureCluster(2) // 2 + random().nextInt(3)
- .addConfig("conf", configset("cloud-minimal-inplace-updates"))
- .configure();
- Boolean useLegacyCloud = rarely();
- LOG.info("Using legacyCloud?: {}", useLegacyCloud);
- CollectionAdminRequest.ClusterProp clusterPropRequest = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, String.valueOf(useLegacyCloud));
- CollectionAdminResponse response = clusterPropRequest.process(cluster.getSolrClient());
- assertEquals(0, response.getStatus());
- }
-
- @AfterClass
- public static void tearDownCluster() {
- TestInjection.reset();
- }
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- collectionName = suggestedCollectionName();
- expectThrows(SolrException.class, () -> getCollectionState(collectionName));
- }
-
- @Override
- public void tearDown() throws Exception {
- for (JettySolrRunner jetty:cluster.getJettySolrRunners()) {
- if (!jetty.isRunning()) {
- LOG.warn("Jetty {} not running, probably some bad test. Starting it", jetty.getLocalPort());
- ChaosMonkey.start(jetty);
- }
- }
- if (cluster.getSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName) != null) {
- LOG.info("tearDown deleting collection");
- CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
- waitForDeletion(collectionName);
- }
- super.tearDown();
- }
-
- /**
- * Asserts that Update logs exist for replicas of type {@link org.apache.solr.common.cloud.Replica.Type#REALTIME}, but not
- * for replicas of type {@link org.apache.solr.common.cloud.Replica.Type#PASSIVE}
- */
- private void assertUlogPresence(DocCollection collection) {
- for (Slice s:collection.getSlices()) {
- for (Replica r:s.getReplicas()) {
- SolrCore core = null;
- try {
- core = cluster.getReplicaJetty(r).getCoreContainer().getCore(r.getCoreName());
- assertNotNull(core);
- assertTrue("Update log should exist for replicas of type Append",
- new java.io.File(core.getUlogDir()).exists());
- } finally {
- core.close();
- }
- }
- }
- }
-
- @Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection
- public void testCreateDelete() throws Exception {
- try {
- CollectionAdminRequest.createCollection(collectionName, "conf", 2, 0, 4, 0)
- .setMaxShardsPerNode(100)
- .process(cluster.getSolrClient());
- DocCollection docCollection = getCollectionState(collectionName);
- assertNotNull(docCollection);
- assertEquals("Expecting 2 shards",
- 2, docCollection.getSlices().size());
- assertEquals("Expecting 4 relpicas per shard",
- 8, docCollection.getReplicas().size());
- assertEquals("Expecting 8 append replicas, 4 per shard",
- 8, docCollection.getReplicas(EnumSet.of(Replica.Type.APPEND)).size());
- assertEquals("Expecting no realtime replicas",
- 0, docCollection.getReplicas(EnumSet.of(Replica.Type.REALTIME)).size());
- assertEquals("Expecting no passive replicas",
- 0, docCollection.getReplicas(EnumSet.of(Replica.Type.PASSIVE)).size());
- for (Slice s:docCollection.getSlices()) {
- assertTrue(s.getLeader().getType() == Replica.Type.APPEND);
- List<String> shardElectionNodes = cluster.getZkClient().getChildren(ZkStateReader.getShardLeadersElectPath(collectionName, s.getName()), null, true);
- assertEquals("Unexpected election nodes for Shard: " + s.getName() + ": " + Arrays.toString(shardElectionNodes.toArray()),
- 4, shardElectionNodes.size());
- }
- assertUlogPresence(docCollection);
- } finally {
- zkClient().printLayoutToStdOut();
- }
- }
-
- @SuppressWarnings("unchecked")
- public void testAddDocs() throws Exception {
- int numAppendReplicas = 1 + random().nextInt(3);
- DocCollection docCollection = createAndWaitForCollection(1, 0, numAppendReplicas, 0);
- assertEquals(1, docCollection.getSlices().size());
-
- cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
- cluster.getSolrClient().commit(collectionName);
-
- Slice s = docCollection.getSlices().iterator().next();
- try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
- assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- }
-
- TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS);
- for (Replica r:s.getReplicas(EnumSet.of(Replica.Type.APPEND))) {
- //TODO: assert replication < REPLICATION_TIMEOUT_SECS
- try (HttpSolrClient appendReplicaClient = getHttpSolrClient(r.getCoreUrl())) {
- while (true) {
- try {
- assertEquals("Replica " + r.getName() + " not up to date after 10 seconds",
- 1, appendReplicaClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- // Append replicas process all updates
- SolrQuery req = new SolrQuery(
- "qt", "/admin/plugins",
- "stats", "true");
- QueryResponse statsResponse = appendReplicaClient.query(req);
- assertEquals("Append replicas should recive all updates. Replica: " + r + ", response: " + statsResponse,
- 1L, ((Map<String, Object>)((NamedList<Object>)statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.cumulativeAdds.count"));
- break;
- } catch (AssertionError e) {
- if (t.hasTimedOut()) {
- throw e;
- } else {
- Thread.sleep(100);
- }
- }
- }
- }
- }
- assertUlogPresence(docCollection);
- }
-
- public void testAddRemoveAppendReplica() throws Exception {
- DocCollection docCollection = createAndWaitForCollection(2, 0, 1, 0);
- assertEquals(2, docCollection.getSlices().size());
-
- CollectionAdminRequest.addReplicaToShard(collectionName, "shard1", Replica.Type.APPEND).process(cluster.getSolrClient());
- docCollection = assertNumberOfReplicas(0, 3, 0, true, false);
- CollectionAdminRequest.addReplicaToShard(collectionName, "shard2", Replica.Type.APPEND).process(cluster.getSolrClient());
- docCollection = assertNumberOfReplicas(0, 4, 0, true, false);
-
- waitForState("Expecting collection to have 2 shards and 2 replica each", collectionName, clusterShape(2, 2));
-
- //Delete passive replica from shard1
- CollectionAdminRequest.deleteReplica(
- collectionName,
- "shard1",
- docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.APPEND)).get(0).getName())
- .process(cluster.getSolrClient());
- assertNumberOfReplicas(0, 3, 0, true, true);
- }
-
- public void testRemoveLeader() throws Exception {
- doReplaceLeader(true);
- }
-
- public void testKillLeader() throws Exception {
- doReplaceLeader(false);
- }
-
- public void testRealTimeGet() throws SolrServerException, IOException, KeeperException, InterruptedException {
- // should be redirected to Replica.Type.REALTIME
- int numReplicas = random().nextBoolean()?1:2;
- int numRealtimeReplicas = random().nextBoolean()?0:2;
- CollectionAdminRequest.createCollection(collectionName, "conf", 1, numRealtimeReplicas, numReplicas, 0)
- .setMaxShardsPerNode(100)
- .process(cluster.getSolrClient());
- waitForState("Unexpected replica count", collectionName, activeReplicaCount(numRealtimeReplicas, numReplicas, 0));
- DocCollection docCollection = assertNumberOfReplicas(numRealtimeReplicas, numReplicas, 0, false, true);
- HttpClient httpClient = cluster.getSolrClient().getHttpClient();
- int id = 0;
- Slice slice = docCollection.getSlice("shard1");
- List<String> ids = new ArrayList<>(slice.getReplicas().size());
- for (Replica rAdd:slice.getReplicas()) {
- try (HttpSolrClient client = getHttpSolrClient(rAdd.getCoreUrl(), httpClient)) {
- client.add(new SolrInputDocument("id", String.valueOf(id), "foo_s", "bar"));
- }
- SolrDocument docCloudClient = cluster.getSolrClient().getById(collectionName, String.valueOf(id));
- assertEquals("bar", docCloudClient.getFieldValue("foo_s"));
- for (Replica rGet:slice.getReplicas()) {
- try (HttpSolrClient client = getHttpSolrClient(rGet.getCoreUrl(), httpClient)) {
- SolrDocument doc = client.getById(String.valueOf(id));
- assertEquals("bar", doc.getFieldValue("foo_s"));
- }
- }
- ids.add(String.valueOf(id));
- id++;
- }
- SolrDocumentList previousAllIdsResult = null;
- for (Replica rAdd:slice.getReplicas()) {
- try (HttpSolrClient client = getHttpSolrClient(rAdd.getCoreUrl(), httpClient)) {
- SolrDocumentList allIdsResult = client.getById(ids);
- if (previousAllIdsResult != null) {
- assertTrue(compareSolrDocumentList(previousAllIdsResult, allIdsResult));
- } else {
- // set the first response here
- previousAllIdsResult = allIdsResult;
- assertEquals("Unexpected number of documents", ids.size(), allIdsResult.getNumFound());
- }
- }
- id++;
- }
- }
-
- /*
- * validate leader election and that replication still happens on a new leader
- */
- private void doReplaceLeader(boolean removeReplica) throws Exception {
- DocCollection docCollection = createAndWaitForCollection(1, 0, 2, 0);
-
- // Add a document and commit
- cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
- cluster.getSolrClient().commit(collectionName);
- Slice s = docCollection.getSlices().iterator().next();
- try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
- assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- }
-
- waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.APPEND)), REPLICATION_TIMEOUT_SECS);
-
- // Delete leader replica from shard1
- JettySolrRunner leaderJetty = null;
- if (removeReplica) {
- CollectionAdminRequest.deleteReplica(
- collectionName,
- "shard1",
- s.getLeader().getName())
- .process(cluster.getSolrClient());
- } else {
- leaderJetty = cluster.getReplicaJetty(s.getLeader());
- ChaosMonkey.kill(leaderJetty);
- waitForState("Leader replica not removed", collectionName, clusterShape(1, 1));
- // Wait for cluster state to be updated
- waitForState("Replica state not updated in cluster state",
- collectionName, clusterStateReflectsActiveAndDownReplicas());
- }
- docCollection = assertNumberOfReplicas(0, 1, 0, true, true);
-
- // Wait until a new leader is elected
- TimeOut t = new TimeOut(30, TimeUnit.SECONDS);
- while (!t.hasTimedOut()) {
- docCollection = getCollectionState(collectionName);
- Replica leader = docCollection.getSlice("shard1").getLeader();
- if (leader != null && leader.isActive(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes())) {
- break;
- }
- Thread.sleep(500);
- }
- assertFalse("Timeout waiting for a new leader to be elected", t.hasTimedOut());
-
- // There is a new leader, I should be able to add and commit
- cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"));
- cluster.getSolrClient().commit(collectionName);
-
- // Queries should still work
- waitForNumDocsInAllReplicas(2, docCollection.getReplicas(EnumSet.of(Replica.Type.APPEND)), REPLICATION_TIMEOUT_SECS);
- // Start back the node
- if (removeReplica) {
- CollectionAdminRequest.addReplicaToShard(collectionName, "shard1", Replica.Type.APPEND).process(cluster.getSolrClient());
- } else {
- ChaosMonkey.start(leaderJetty);
- }
- waitForState("Expected collection to be 1x2", collectionName, clusterShape(1, 2));
- // added replica should replicate from the leader
- waitForNumDocsInAllReplicas(2, docCollection.getReplicas(EnumSet.of(Replica.Type.APPEND)), REPLICATION_TIMEOUT_SECS);
- }
-
- public void testKillAppendReplica() throws Exception {
- DocCollection docCollection = createAndWaitForCollection(1, 0, 2, 0);
-
- waitForNumDocsInAllActiveReplicas(0);
- cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
- cluster.getSolrClient().commit(collectionName);
- waitForNumDocsInAllActiveReplicas(1);
-
- JettySolrRunner passiveReplicaJetty = cluster.getReplicaJetty(docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.APPEND)).get(0));
- ChaosMonkey.kill(passiveReplicaJetty);
- waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
- // Also wait for the replica to be placed in state="down"
- waitForState("Didn't update state", collectionName, clusterStateReflectsActiveAndDownReplicas());
-
- cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "bar"));
- cluster.getSolrClient().commit(collectionName);
- waitForNumDocsInAllActiveReplicas(2);
-
- ChaosMonkey.start(passiveReplicaJetty);
- waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
- waitForNumDocsInAllActiveReplicas(2);
- }
-
- public void testSearchWhileReplicationHappens() {
-
- }
-
- public void testReplication() {
- // Validate incremental replication
- }
-
- public void testOnlyLeaderIndexes() throws Exception {
- createAndWaitForCollection(1, 0, 2, 0);
-
- CloudSolrClient cloudClient = cluster.getSolrClient();
- new UpdateRequest()
- .add(sdoc("id", "1"))
- .add(sdoc("id", "2"))
- .add(sdoc("id", "3"))
- .add(sdoc("id", "4"))
- .process(cloudClient, collectionName);
-
- {
- UpdateHandler updateHandler = getSolrCore(true).get(0).getUpdateHandler();
- RefCounted<IndexWriter> iwRef = updateHandler.getSolrCoreState().getIndexWriter(null);
- assertTrue("IndexWriter at leader must see updates ", iwRef.get().hasUncommittedChanges());
- iwRef.decref();
- }
-
- for (SolrCore solrCore : getSolrCore(false)) {
- RefCounted<IndexWriter> iwRef = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
- assertFalse("IndexWriter at replicas must not see updates ", iwRef.get().hasUncommittedChanges());
- iwRef.decref();
- }
-
- checkRTG(1, 4, cluster.getJettySolrRunners());
-
- new UpdateRequest()
- .deleteById("1")
- .deleteByQuery("id:2")
- .process(cloudClient, collectionName);
-
- // The DBQ is not processed at replicas, so we still can get doc2 and other docs by RTG
- checkRTG(2,4, getSolrRunner(false));
-
- new UpdateRequest()
- .commit(cloudClient, collectionName);
-
- waitForNumDocsInAllActiveReplicas(2);
-
- // Update log roll over
- for (SolrCore solrCore : getSolrCore(false)) {
- UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
- assertFalse(updateLog.hasUncommittedChanges());
- }
-
- // UpdateLog copy over old updates
- for (int i = 15; i <= 150; i++) {
- cloudClient.add(collectionName, sdoc("id",String.valueOf(i)));
- if (random().nextInt(100) < 15 & i != 150) {
- cloudClient.commit(collectionName);
- }
- }
- checkRTG(120,150, cluster.getJettySolrRunners());
- waitForReplicasCatchUp(20);
- }
-
- public void testRecovery() throws Exception {
- boolean useKill = random().nextBoolean();
- createAndWaitForCollection(1, 0, 2, 0);
-
- CloudSolrClient cloudClient = cluster.getSolrClient();
- new UpdateRequest()
- .add(sdoc("id", "3"))
- .add(sdoc("id", "4"))
- .commit(cloudClient, collectionName);
- // Replica recovery
- new UpdateRequest()
- .add(sdoc("id", "5"))
- .process(cloudClient, collectionName);
- JettySolrRunner solrRunner = getSolrRunner(false).get(0);
- if (useKill) {
- ChaosMonkey.kill(solrRunner);
- } else {
- ChaosMonkey.stop(solrRunner);
- }
- new UpdateRequest()
- .add(sdoc("id", "6"))
- .process(cloudClient, collectionName);
- ChaosMonkey.start(solrRunner);
- waitForState("Replica didn't recover", collectionName, activeReplicaCount(0,2,0));
- // We skip peerSync, so replica will always trigger commit on leader
- waitForNumDocsInAllActiveReplicas(4);
-
- // If I add the doc immediately, the leader fails to communicate with the follower with broken pipe. Related to SOLR-9555 I believe
- //nocommit
- Thread.sleep(10000);
-
- // More Replica recovery testing
- new UpdateRequest()
- .add(sdoc("id", "7"))
- .process(cloudClient, collectionName);
- checkRTG(3,7, cluster.getJettySolrRunners());
- DirectUpdateHandler2.commitOnClose = false;
- ChaosMonkey.stop(solrRunner);
- DirectUpdateHandler2.commitOnClose = true;
- ChaosMonkey.start(solrRunner);
- waitForState("Replica didn't recover", collectionName, activeReplicaCount(0,2,0));
- checkRTG(3,7, cluster.getJettySolrRunners());
- waitForNumDocsInAllActiveReplicas(5, 0);
-
- // Test replica recovery apply buffer updates
- Semaphore waitingForBufferUpdates = new Semaphore(0);
- Semaphore waitingForReplay = new Semaphore(0);
- RecoveryStrategy.testing_beforeReplayBufferingUpdates = () -> {
- try {
- waitingForReplay.release();
- waitingForBufferUpdates.acquire();
- } catch (InterruptedException e) {
- e.printStackTrace();
- fail("Test interrupted: " + e.getMessage());
- }
- };
- if (useKill) {
- ChaosMonkey.kill(solrRunner);
- } else {
- ChaosMonkey.stop(solrRunner);
- }
- ChaosMonkey.start(solrRunner);
- waitingForReplay.acquire();
- new UpdateRequest()
- .add(sdoc("id", "8"))
- .add(sdoc("id", "9"))
- .process(cloudClient, collectionName);
- waitingForBufferUpdates.release();
- RecoveryStrategy.testing_beforeReplayBufferingUpdates = null;
- waitForState("Replica didn't recover", collectionName, activeReplicaCount(0,2,0));
- checkRTG(3,9, cluster.getJettySolrRunners());
- waitForNumDocsInAllActiveReplicas(5, 0);
- for (SolrCore solrCore : getSolrCore(false)) {
- RefCounted<IndexWriter> iwRef = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
- assertFalse("IndexWriter at replicas must not see updates ", iwRef.get().hasUncommittedChanges());
- iwRef.decref();
- }
- }
-
- public void testDeleteById() throws Exception{
- createAndWaitForCollection(1,0,2,0);
- CloudSolrClient cloudClient = cluster.getSolrClient();
- new UpdateRequest()
- .deleteByQuery("*:*")
- .commit(cluster.getSolrClient(), collectionName);
- new UpdateRequest()
- .add(sdoc("id", "1"))
- .commit(cloudClient, collectionName);
- waitForNumDocsInAllActiveReplicas(1);
- new UpdateRequest()
- .deleteById("1")
- .process(cloudClient, collectionName);
- boolean successs = false;
- try {
- checkRTG(1, 1, cluster.getJettySolrRunners());
- successs = true;
- } catch (AssertionError e) {
- //expected
- }
- assertFalse("Doc1 is deleted but it's still exist", successs);
- }
-
- public void testBasicLeaderElection() throws Exception {
- createAndWaitForCollection(1,0,2,0);
- CloudSolrClient cloudClient = cluster.getSolrClient();
- new UpdateRequest()
- .deleteByQuery("*:*")
- .commit(cluster.getSolrClient(), collectionName);
- new UpdateRequest()
- .add(sdoc("id", "1"))
- .add(sdoc("id", "2"))
- .process(cloudClient, collectionName);
- JettySolrRunner oldLeaderJetty = getSolrRunner(true).get(0);
- ChaosMonkey.kill(oldLeaderJetty);
- waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
- new UpdateRequest()
- .add(sdoc("id", "3"))
- .add(sdoc("id", "4"))
- .process(cloudClient, collectionName);
- ChaosMonkey.start(oldLeaderJetty);
- waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
- checkRTG(1,4, cluster.getJettySolrRunners());
- new UpdateRequest()
- .commit(cloudClient, collectionName);
- waitForNumDocsInAllActiveReplicas(4, 0);
- }
-
- public void testOutOfOrderDBQWithInPlaceUpdates() throws Exception {
- createAndWaitForCollection(1,0,2,0);
- assertFalse(getSolrCore(true).get(0).getLatestSchema().getField("inplace_updatable_int").indexed());
- assertFalse(getSolrCore(true).get(0).getLatestSchema().getField("inplace_updatable_int").stored());
- assertTrue(getSolrCore(true).get(0).getLatestSchema().getField("inplace_updatable_int").hasDocValues());
- List<UpdateRequest> updates = new ArrayList<>();
- updates.add(simulatedUpdateRequest(null, "id", 1, "title_s", "title0_new", "inplace_updatable_int", 5, "_version_", 1L)); // full update
- updates.add(simulatedDBQ("inplace_updatable_int:5", 3L));
- updates.add(simulatedUpdateRequest(1L, "id", 1, "inplace_updatable_int", 6, "_version_", 2L));
- for (JettySolrRunner solrRunner: getSolrRunner(false)) {
- try (SolrClient client = solrRunner.newClient()) {
- for (UpdateRequest up : updates) {
- up.process(client, collectionName);
- }
- }
- }
- JettySolrRunner oldLeaderJetty = getSolrRunner(true).get(0);
- ChaosMonkey.kill(oldLeaderJetty);
- waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
- ChaosMonkey.start(oldLeaderJetty);
- waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
- checkRTG(1,1, cluster.getJettySolrRunners());
- SolrDocument doc = cluster.getSolrClient().getById(collectionName,"1");
- assertNotNull(doc.get("title_s"));
- }
-
- private UpdateRequest simulatedUpdateRequest(Long prevVersion, Object... fields) throws SolrServerException, IOException {
- SolrInputDocument doc = sdoc(fields);
-
- // get baseUrl of the leader
- String baseUrl = getBaseUrl();
-
- UpdateRequest ur = new UpdateRequest();
- ur.add(doc);
- ur.setParam("update.distrib", "FROMLEADER");
- if (prevVersion != null) {
- ur.setParam("distrib.inplace.prevversion", String.valueOf(prevVersion));
- ur.setParam("distrib.inplace.update", "true");
- }
- ur.setParam("distrib.from", baseUrl);
- return ur;
- }
-
- private UpdateRequest simulatedDBQ(String query, long version) throws SolrServerException, IOException {
- String baseUrl = getBaseUrl();
-
- UpdateRequest ur = new UpdateRequest();
- ur.deleteByQuery(query);
- ur.setParam("_version_", ""+version);
- ur.setParam("update.distrib", "FROMLEADER");
- ur.setParam("distrib.from", baseUrl);
- return ur;
- }
-
- private String getBaseUrl() {
- DocCollection collection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName);
- Slice slice = collection.getSlice("shard1");
- return slice.getLeader().getCoreUrl();
- }
-
- private DocCollection createAndWaitForCollection(int numShards, int numRealtimeReplicas, int numAppendReplicas, int numPassiveReplicas) throws SolrServerException, IOException, KeeperException, InterruptedException {
- CollectionAdminRequest.createCollection(collectionName, "conf", numShards, numRealtimeReplicas, numAppendReplicas, numPassiveReplicas)
- .setMaxShardsPerNode(100)
- .process(cluster.getSolrClient());
- int numReplicasPerShard = numRealtimeReplicas + numAppendReplicas + numPassiveReplicas;
- cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
- waitForState("Expected collection to be created with " + numShards + " shards and " + numReplicasPerShard + " replicas",
- collectionName, clusterShape(numShards, numReplicasPerShard));
- return assertNumberOfReplicas(numRealtimeReplicas*numShards, numAppendReplicas*numShards, numPassiveReplicas*numShards, false, true);
- }
-
- private void waitForNumDocsInAllActiveReplicas(int numDocs) throws IOException, SolrServerException, InterruptedException {
- waitForNumDocsInAllActiveReplicas(numDocs, REPLICATION_TIMEOUT_SECS);
- }
-
- private void waitForNumDocsInAllActiveReplicas(int numDocs, int timeout) throws IOException, SolrServerException, InterruptedException {
- DocCollection docCollection = getCollectionState(collectionName);
- waitForNumDocsInAllReplicas(numDocs, docCollection.getReplicas().stream().filter(r -> r.getState() == Replica.State.ACTIVE).collect(Collectors.toList()), timeout);
- }
-
- private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas, int timeout) throws IOException, SolrServerException, InterruptedException {
- waitForNumDocsInAllReplicas(numDocs, replicas, "*:*", timeout);
- }
-
- private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas, String query, int timeout) throws IOException, SolrServerException, InterruptedException {
- TimeOut t = new TimeOut(timeout, TimeUnit.SECONDS);
- for (Replica r:replicas) {
- if (!r.isActive(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes())) {
- continue;
- }
- try (HttpSolrClient replicaClient = getHttpSolrClient(r.getCoreUrl())) {
- while (true) {
- try {
- assertEquals("Replica " + r.getName() + " not up to date after " + REPLICATION_TIMEOUT_SECS + " seconds",
- numDocs, replicaClient.query(new SolrQuery(query)).getResults().getNumFound());
- break;
- } catch (AssertionError e) {
- if (t.hasTimedOut()) {
- throw e;
- } else {
- Thread.sleep(100);
- }
- }
- }
- }
- }
- }
-
- private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
- TimeOut t = new TimeOut(10, TimeUnit.SECONDS);
- while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
- try {
- Thread.sleep(100);
- if (t.hasTimedOut()) {
- fail("Timed out waiting for collection " + collection + " to be deleted.");
- }
- cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
- } catch(SolrException e) {
- return;
- }
-
- }
- }
-
- private DocCollection assertNumberOfReplicas(int numWriter, int numActive, int numPassive, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
- if (updateCollection) {
- cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
- }
- DocCollection docCollection = getCollectionState(collectionName);
- assertNotNull(docCollection);
- assertEquals("Unexpected number of writer replicas: " + docCollection, numWriter,
- docCollection.getReplicas(EnumSet.of(Replica.Type.REALTIME)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
- assertEquals("Unexpected number of passive replicas: " + docCollection, numPassive,
- docCollection.getReplicas(EnumSet.of(Replica.Type.PASSIVE)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
- assertEquals("Unexpected number of active replicas: " + docCollection, numActive,
- docCollection.getReplicas(EnumSet.of(Replica.Type.APPEND)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
- return docCollection;
- }
-
- /*
- * passes only if all replicas are active or down, and the "liveNodes" reflect the same status
- */
- private CollectionStatePredicate clusterStateReflectsActiveAndDownReplicas() {
- return (liveNodes, collectionState) -> {
- for (Replica r:collectionState.getReplicas()) {
- if (r.getState() != Replica.State.DOWN && r.getState() != Replica.State.ACTIVE) {
- return false;
- }
- if (r.getState() == Replica.State.DOWN && liveNodes.contains(r.getNodeName())) {
- return false;
- }
- if (r.getState() == Replica.State.ACTIVE && !liveNodes.contains(r.getNodeName())) {
- return false;
- }
- }
- return true;
- };
- }
-
-
- private CollectionStatePredicate activeReplicaCount(int numWriter, int numActive, int numPassive) {
- return (liveNodes, collectionState) -> {
- int writersFound = 0, activesFound = 0, passivesFound = 0;
- if (collectionState == null)
- return false;
- for (Slice slice : collectionState) {
- for (Replica replica : slice) {
- if (replica.isActive(liveNodes))
- switch (replica.getType()) {
- case APPEND:
- activesFound++;
- break;
- case PASSIVE:
- passivesFound++;
- break;
- case REALTIME:
- writersFound++;
- break;
- default:
- throw new AssertionError("Unexpected replica type");
- }
- }
- }
- return numWriter == writersFound && numActive == activesFound && numPassive == passivesFound;
- };
- }
-
- private List<SolrCore> getSolrCore(boolean isLeader) {
- List<SolrCore> rs = new ArrayList<>();
-
- CloudSolrClient cloudClient = cluster.getSolrClient();
- DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(collectionName);
-
- for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) {
- if (solrRunner.getCoreContainer() == null) continue;
- for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) {
- CloudDescriptor cloudDescriptor = solrCore.getCoreDescriptor().getCloudDescriptor();
- Slice slice = docCollection.getSlice(cloudDescriptor.getShardId());
- Replica replica = docCollection.getReplica(cloudDescriptor.getCoreNodeName());
- if (slice.getLeader().equals(replica) && isLeader) {
- rs.add(solrCore);
- } else if (!slice.getLeader().equals(replica) && !isLeader) {
- rs.add(solrCore);
- }
- }
- }
- return rs;
- }
-
- private void checkRTG(int from, int to, List<JettySolrRunner> solrRunners) throws Exception{
- for (JettySolrRunner solrRunner: solrRunners) {
- try (SolrClient client = solrRunner.newClient()) {
- for (int i = from; i <= to; i++) {
- SolrQuery query = new SolrQuery();
- query.set("distrib", false);
- query.setRequestHandler("/get");
- query.set("id",i);
- QueryResponse res = client.query(collectionName, query);
- assertNotNull("Can not find doc "+ i + " in " + solrRunner.getBaseUrl(),res.getResponse().get("doc"));
- }
- }
- }
- }
-
- private List<JettySolrRunner> getSolrRunner(boolean isLeader) {
- List<JettySolrRunner> rs = new ArrayList<>();
- CloudSolrClient cloudClient = cluster.getSolrClient();
- DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(collectionName);
- for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) {
- if (solrRunner.getCoreContainer() == null) continue;
- for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) {
- CloudDescriptor cloudDescriptor = solrCore.getCoreDescriptor().getCloudDescriptor();
- Slice slice = docCollection.getSlice(cloudDescriptor.getShardId());
- Replica replica = docCollection.getReplica(cloudDescriptor.getCoreNodeName());
- if (slice.getLeader() == replica && isLeader) {
- rs.add(solrRunner);
- } else if (slice.getLeader() != replica && !isLeader) {
- rs.add(solrRunner);
- }
- }
- }
- return rs;
- }
-
- private void waitForReplicasCatchUp(int numTry) throws IOException, InterruptedException {
- String leaderTimeCommit = getSolrCore(true).get(0).getDeletionPolicy().getLatestCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
- if (leaderTimeCommit == null) return;
- for (int i = 0; i < numTry; i++) {
- boolean inSync = true;
- for (SolrCore solrCore : getSolrCore(false)) {
- String replicateTimeCommit = solrCore.getDeletionPolicy().getLatestCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
- if (!leaderTimeCommit.equals(replicateTimeCommit)) {
- inSync = false;
- Thread.sleep(500);
- break;
- }
- }
- if (inSync) return;
- }
-
- fail("Some replicas are not in sync with leader");
-
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
index 965c169..c7fc0e8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
@@ -110,7 +110,7 @@ public class TestCloudRecovery extends SolrCloudTestCase {
assertEquals(4, resp.getResults().getNumFound());
// Make sure all nodes is recover from tlog
if (onlyLeaderIndexes) {
- // Leader election can be kicked off, so 2 append replicas will replay its tlog before becoming new leader
+ // Leader election can be kicked off, so 2 tlog replicas will replay its tlog before becoming new leader
assertTrue( countReplayLog.get() >=2);
} else {
assertEquals(4, countReplayLog.get());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9e82fd45/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
index 68509b8..74ad7bd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
@@ -61,7 +61,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
public void test() throws Exception {
try (CloudSolrClient client = createCloudClient(null)) {
CollectionAdminRequest.Create req;
- if (useAppendReplicas()) {
+ if (useTlogReplicas()) {
req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 0, 1, 1);
} else {
req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
@@ -177,7 +177,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
assertNotNull(collection);
assertEquals("conf1", collection.get("configName"));
-// assertEquals("1", collection.get("realtimeReplicas"));
+// assertEquals("1", collection.get("nrtReplicas"));
}
}