You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by th...@apache.org on 2021/05/08 14:00:32 UTC

[solr] branch main updated: SOLR-11904: Mark ReplicationHandler's polling thread as a Solr server thread so the PKI Interceptor is activated to allow PULL replicas to replicate from security-enabled leaders (#112)

This is an automated email from the ASF dual-hosted git repository.

thelabdude pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/main by this push:
     new ae1ac22  SOLR-11904: Mark ReplicationHandler's polling thread as a Solr server thread so the PKI Interceptor is activated to allow PULL replicas to replicate from security-enabled leaders (#112)
ae1ac22 is described below

commit ae1ac22eb70d44f80e5c89241bdd0d3e4144e62a
Author: Timothy Potter <th...@gmail.com>
AuthorDate: Sat May 8 08:00:23 2021 -0600

    SOLR-11904: Mark ReplicationHandler's polling thread as a Solr server thread so the PKI Interceptor is activated to allow PULL replicas to replicate from security-enabled leaders (#112)
    
    * SOLR-11904: Mark ReplicationHandler's polling thread as a Solr server thread so the PKI Interceptor is activated to allow PULL replicas to replicate from security-enabled leaders
    
    * Remove unnecessary change
    
    * Re-enable TestPullReplica
    
    * Fix bug introduced with refactoring and rename setupCluster method for new test
    
    * Fix a few IDE complaints
    
    * Address review comments
    
    * Mention Torsten Bøgh Köster in CHANGES.txt
---
 solr/CHANGES.txt                                   |   3 +
 .../apache/solr/handler/ReplicationHandler.java    |   3 +
 .../org/apache/solr/cloud/TestPullReplica.java     |  89 ++++++------
 .../apache/solr/cloud/TestPullReplicaWithAuth.java | 156 +++++++++++++++++++++
 4 files changed, 204 insertions(+), 47 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index e2ebaec..ea10310 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -381,6 +381,9 @@ Bug Fixes
 
 * SOLR-15383: Solr Zookeeper status page shows green even when some Zookeepers are not serving requests (janhoy)
 
+* SOLR-11904: Mark ReplicationHandler's polling thread as a Solr server thread so the PKI Interceptor is activated to
+  allow PULL replicas to replicate from security-enabled leaders (Timothy Potter, Torsten Bøgh Köster)
+
 Other Changes
 ---------------------
 * SOLR-15118: Deprecate CollectionAdminRequest.getV2Request(). (Jason Gerlowski)
diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
index 414d3c7..4572be1 100644
--- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
@@ -1208,6 +1208,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
         log.info("Poll disabled");
         return;
       }
+      ExecutorUtil.setServerThreadFlag(true); // so PKI auth works
       try {
         log.debug("Polling for index modifications");
         markScheduledExecutionStart();
@@ -1215,6 +1216,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
         if (pollListener != null) pollListener.onComplete(core, fetchResult);
       } catch (Exception e) {
         log.error("Exception in fetching index", e);
+      } finally {
+        ExecutorUtil.setServerThreadFlag(null);
       }
     };
     executorService = Executors.newSingleThreadScheduledExecutor(
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
index 5e06c28..1bbe258 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
@@ -28,19 +28,19 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
 import org.apache.http.HttpResponse;
-import org.apache.http.client.ClientProtocolException;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.client.methods.HttpPost;
 import org.apache.http.entity.StringEntity;
-import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
@@ -54,6 +54,7 @@ import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.util.LogLevel;
 import org.apache.solr.util.TestInjection;
 import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.KeeperException;
@@ -64,10 +65,8 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.carrotsearch.randomizedtesting.annotations.Repeat;
-
 @Slow
-@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
+@LogLevel("org.apache.solr.handler.ReplicationHandler=DEBUG,org.apache.solr.handler.IndexFetcher=DEBUG")
 public class TestPullReplica extends SolrCloudTestCase {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -80,7 +79,7 @@ public class TestPullReplica extends SolrCloudTestCase {
   }
 
   @BeforeClass
-  public static void setupCluster() throws Exception {
+  public static void createTestCluster() throws Exception {
    //  cloudSolrClientMaxStaleRetries
    System.setProperty("cloudSolrClientMaxStaleRetries", "1");
    System.setProperty("zkReaderGetLeaderRetryTimeoutMs", "1000");
@@ -170,7 +169,7 @@ public class TestPullReplica extends SolrCloudTestCase {
             2, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
         for (Slice s:docCollection.getSlices()) {
           // read-only replicas can never become leaders
-          assertFalse(s.getLeader().getType() == Replica.Type.PULL);
+          assertNotSame(s.getLeader().getType(), Replica.Type.PULL);
           List<String> shardElectionNodes = cluster.getZkClient().getChildren(ZkStateReader.getShardLeadersElectPath(collectionName, s.getName()), null, true);
           assertEquals("Unexpected election nodes for Shard: " + s.getName() + ": " + Arrays.toString(shardElectionNodes.toArray()),
               1, shardElectionNodes.size());
@@ -194,27 +193,22 @@ public class TestPullReplica extends SolrCloudTestCase {
   /**
    * Asserts that Update logs don't exist for replicas of type {@link org.apache.solr.common.cloud.Replica.Type#PULL}
    */
-  private void assertUlogPresence(DocCollection collection) {
+  static void assertUlogPresence(DocCollection collection) {
     for (Slice s:collection.getSlices()) {
       for (Replica r:s.getReplicas()) {
         if (r.getType() == Replica.Type.NRT) {
           continue;
         }
-        SolrCore core = null;
-        try {
-          core = cluster.getReplicaJetty(r).getCoreContainer().getCore(r.getCoreName());
+        try (SolrCore core = cluster.getReplicaJetty(r).getCoreContainer().getCore(r.getCoreName())) {
           assertNotNull(core);
           assertFalse("Update log should not exist for replicas of type Passive but file is present: " + core.getUlogDir(),
               new java.io.File(core.getUlogDir()).exists());
-        } finally {
-          core.close();
         }
       }
     }
   }
 
   @SuppressWarnings("unchecked")
-  // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
   public void testAddDocs() throws Exception {
     int numPullReplicas = 1 + random().nextInt(3);
     CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, numPullReplicas)
@@ -229,37 +223,27 @@ public class TestPullReplica extends SolrCloudTestCase {
       numDocs++;
       cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", String.valueOf(numDocs), "foo", "bar"));
       cluster.getSolrClient().commit(collectionName);
+      log.info("Committed doc {} to leader", numDocs);
 
       Slice s = docCollection.getSlices().iterator().next();
       try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
         assertEquals(numDocs, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
       }
+      log.info("Found {} docs in leader, verifying updates make it to {} pull replicas", numDocs, numPullReplicas);
 
-      TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-      for (Replica r:s.getReplicas(EnumSet.of(Replica.Type.PULL))) {
-        //TODO: assert replication < REPLICATION_TIMEOUT_SECS
+      List<Replica> pullReplicas = s.getReplicas(EnumSet.of(Replica.Type.PULL));
+      waitForNumDocsInAllReplicas(numDocs, pullReplicas);
+
+      for (Replica r : pullReplicas) {
         try (HttpSolrClient pullReplicaClient = getHttpSolrClient(r.getCoreUrl())) {
-          while (true) {
-            try {
-              assertEquals("Replica " + r.getName() + " not up to date after 10 seconds",
-                  numDocs, pullReplicaClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-              break;
-            } catch (AssertionError e) {
-              if (t.hasTimedOut()) {
-                throw e;
-              } else {
-                Thread.sleep(100);
-              }
-            }
-          }
-          SolrQuery req = new SolrQuery(
-              "qt", "/admin/plugins",
-              "stats", "true");
+          SolrQuery req = new SolrQuery("qt", "/admin/plugins", "stats", "true");
           QueryResponse statsResponse = pullReplicaClient.query(req);
-          assertEquals("Replicas shouldn't process the add document request: " + statsResponse,
-              0L, ((Map<String, Object>)(statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.adds"));
+          // The adds gauge metric should be null for pull replicas since they don't process adds
+          assertNull("Replicas shouldn't process the add document request: " + statsResponse,
+              ((Map<String, Object>)(statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.adds"));
         }
       }
+
       if (reloaded) {
         break;
       } else {
@@ -445,7 +429,7 @@ public class TestPullReplica extends SolrCloudTestCase {
     // Also fails if I send the update to the pull replica explicitly
     try (HttpSolrClient pullReplicaClient = getHttpSolrClient(docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
       expectThrows(SolrException.class, () ->
-        cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"))
+          pullReplicaClient.add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"))
       );
     }
     if (removeReplica) {
@@ -486,7 +470,7 @@ public class TestPullReplica extends SolrCloudTestCase {
       leaderClient.commit();
       assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
     }
-    waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)), "id:2");
+    waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)), "id:2", null, null);
     waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)));
   }
 
@@ -528,23 +512,30 @@ public class TestPullReplica extends SolrCloudTestCase {
   }
 
   private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas) throws IOException, SolrServerException, InterruptedException {
-    waitForNumDocsInAllReplicas(numDocs, replicas, "*:*");
+    waitForNumDocsInAllReplicas(numDocs, replicas, "*:*", null, null);
   }
 
-  private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas, String query) throws IOException, SolrServerException, InterruptedException {
+  static void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas, String query, String user, String pass) throws IOException, SolrServerException, InterruptedException {
     TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
     for (Replica r:replicas) {
-      try (HttpSolrClient replicaClient = getHttpSolrClient(r.getCoreUrl())) {
+      String replicaUrl = r.getCoreUrl();
+      try (HttpSolrClient replicaClient = getHttpSolrClient(replicaUrl)) {
         while (true) {
+          QueryRequest req = new QueryRequest(new SolrQuery(query));
+          if (user != null && pass != null) {
+            req.setBasicAuthCredentials(user, pass);
+          }
           try {
-            assertEquals("Replica " + r.getName() + " not up to date after " + REPLICATION_TIMEOUT_SECS + " seconds",
-                numDocs, replicaClient.query(new SolrQuery(query)).getResults().getNumFound());
+            long numFound = req.process(replicaClient).getResults().getNumFound();
+            assertEquals("Replica " + r.getName() + " (" + replicaUrl + ") not up to date after " + REPLICATION_TIMEOUT_SECS + " seconds",
+                numDocs, numFound);
+            log.info("Replica {} ({}) has all {} docs", r.name, replicaUrl, numDocs);
             break;
           } catch (AssertionError e) {
             if (t.hasTimedOut()) {
               throw e;
             } else {
-              Thread.sleep(100);
+              Thread.sleep(200);
             }
           }
         }
@@ -552,7 +543,7 @@ public class TestPullReplica extends SolrCloudTestCase {
     }
   }
 
-  private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
+  static void waitForDeletion(String collection) throws InterruptedException, KeeperException {
     TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
     while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
       log.info("Collection not yet deleted");
@@ -570,10 +561,14 @@ public class TestPullReplica extends SolrCloudTestCase {
   }
 
   private DocCollection assertNumberOfReplicas(int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
+    return assertNumberOfReplicas(collectionName, numNrtReplicas, numTlogReplicas, numPullReplicas, updateCollection, activeOnly);
+  }
+
+  static DocCollection assertNumberOfReplicas(String coll, int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
     if (updateCollection) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
+      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(coll);
     }
-    DocCollection docCollection = getCollectionState(collectionName);
+    DocCollection docCollection = getCollectionState(coll);
     assertNotNull(docCollection);
     assertEquals("Unexpected number of writer replicas: " + docCollection, numNrtReplicas,
         docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
@@ -641,7 +636,7 @@ public class TestPullReplica extends SolrCloudTestCase {
     cluster.getSolrClient().commit(collectionName);
   }
 
-  private void addReplicaToShard(String shardName, Replica.Type type) throws ClientProtocolException, IOException, SolrServerException {
+  private void addReplicaToShard(String shardName, Replica.Type type) throws IOException, SolrServerException {
     switch (random().nextInt(3)) {
       case 0: // Add replica with SolrJ
         CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, shardName, type).process(cluster.getSolrClient());
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaWithAuth.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaWithAuth.java
new file mode 100644
index 0000000..b5cb370
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaWithAuth.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrResponse;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.security.BasicAuthPlugin;
+import org.apache.solr.security.RuleBasedAuthorizationPlugin;
+import org.junit.BeforeClass;
+
+import static java.util.Collections.singletonList;
+import static java.util.Collections.singletonMap;
+import static org.apache.solr.cloud.TestPullReplica.assertNumberOfReplicas;
+import static org.apache.solr.cloud.TestPullReplica.assertUlogPresence;
+import static org.apache.solr.cloud.TestPullReplica.waitForDeletion;
+import static org.apache.solr.cloud.TestPullReplica.waitForNumDocsInAllReplicas;
+import static org.apache.solr.security.Sha256AuthenticationProvider.getSaltedHashedValue;
+
+@Slow
+public class TestPullReplicaWithAuth extends SolrCloudTestCase {
+
+  private static final String USER = "solr";
+  private static final String PASS = "SolrRocksAgain";
+  private static final String collectionName = "testPullReplicaWithAuth";
+
+  @BeforeClass
+  public static void setupClusterWithSecurityEnabled() throws Exception {
+    final String SECURITY_JSON = Utils.toJSONString
+        (Map.of("authorization",
+            Map.of("class", RuleBasedAuthorizationPlugin.class.getName(),
+                "user-role", singletonMap(USER, "admin"),
+                "permissions", singletonList(Map.of("name", "all", "role", "admin"))),
+            "authentication",
+            Map.of("class", BasicAuthPlugin.class.getName(),
+                "blockUnknown", true,
+                "credentials", singletonMap(USER, getSaltedHashedValue(PASS)))));
+
+    configureCluster(2)
+        .addConfig("conf", configset("cloud-minimal"))
+        .withSecurityJson(SECURITY_JSON)
+        .configure();
+  }
+
+  private <T extends SolrRequest<? extends SolrResponse>> T withBasicAuth(T req) {
+    req.setBasicAuthCredentials(USER, PASS);
+    return req;
+  }
+
+  private QueryResponse queryWithBasicAuth(HttpSolrClient client, SolrQuery q) throws IOException, SolrServerException {
+    return withBasicAuth(new QueryRequest(q)).process(client);
+  }
+
+  public void testPKIAuthWorksForPullReplication() throws Exception {
+    int numPullReplicas = 2;
+    withBasicAuth(CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, numPullReplicas))
+        .process(cluster.getSolrClient());
+    waitForState("Expected collection to be created with 1 shard and " + (numPullReplicas + 1) + " replicas",
+        collectionName, clusterShape(1, numPullReplicas + 1));
+    DocCollection docCollection =
+        assertNumberOfReplicas(collectionName, 1, 0, numPullReplicas, false, true);
+
+    int numDocs = 0;
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    for (int i = 0; i < 5; i++) {
+      numDocs++;
+
+      UpdateRequest ureq = withBasicAuth(new UpdateRequest());
+      ureq.add(new SolrInputDocument("id", String.valueOf(numDocs), "foo", "bar"));
+      ureq.commit(solrClient, collectionName);
+
+      Slice s = docCollection.getSlices().iterator().next();
+      try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
+        assertEquals(numDocs, queryWithBasicAuth(leaderClient, new SolrQuery("*:*")).getResults().getNumFound());
+      }
+
+      List<Replica> pullReplicas = s.getReplicas(EnumSet.of(Replica.Type.PULL));
+      waitForNumDocsInAllReplicas(numDocs, pullReplicas, "*:*", USER, PASS);
+
+      for (Replica r : pullReplicas) {
+        try (HttpSolrClient pullReplicaClient = getHttpSolrClient(r.getCoreUrl())) {
+          QueryResponse statsResponse = queryWithBasicAuth(pullReplicaClient, new SolrQuery("qt", "/admin/plugins", "stats", "true"));
+          // adds is a gauge, which is null for PULL replicas
+          assertNull("Replicas shouldn't process the add document request: " + statsResponse,
+              getUpdateHandlerMetric(statsResponse, "UPDATE.updateHandler.adds"));
+          assertEquals("Replicas shouldn't process the add document request: " + statsResponse,
+              0L, getUpdateHandlerMetric(statsResponse, "UPDATE.updateHandler.cumulativeAdds.count"));
+        }
+      }
+    }
+
+    CollectionAdminResponse response =
+        withBasicAuth(CollectionAdminRequest.reloadCollection(collectionName)).process(cluster.getSolrClient());
+    assertEquals(0, response.getStatus());
+    assertUlogPresence(docCollection);
+
+    // add another pull replica to ensure it can pull the indexes
+    Slice s = docCollection.getSlices().iterator().next();
+    List<Replica> pullReplicas = s.getReplicas(EnumSet.of(Replica.Type.PULL));
+    assertEquals(numPullReplicas, pullReplicas.size());
+    response = withBasicAuth(CollectionAdminRequest.addReplicaToShard(collectionName, s.getName(), Replica.Type.PULL)).process(cluster.getSolrClient());
+    assertEquals(0, response.getStatus());
+
+    numPullReplicas = numPullReplicas + 1; // added a PULL
+    waitForState("Expected collection to be created with 1 shard and " + (numPullReplicas + 1) + " replicas",
+        collectionName, clusterShape(1, numPullReplicas + 1));
+
+    docCollection =
+        assertNumberOfReplicas(collectionName, 1, 0, numPullReplicas, false, true);
+    s = docCollection.getSlices().iterator().next();
+    pullReplicas = s.getReplicas(EnumSet.of(Replica.Type.PULL));
+    assertEquals(numPullReplicas, pullReplicas.size());
+    waitForNumDocsInAllReplicas(numDocs, pullReplicas, "*:*", USER, PASS);
+
+    withBasicAuth(CollectionAdminRequest.deleteCollection(collectionName)).process(cluster.getSolrClient());
+    waitForDeletion(collectionName);
+  }
+
+  @SuppressWarnings("unchecked")
+  private Object getUpdateHandlerMetric(QueryResponse statsResponse, String metric) {
+    return ((Map<String, Object>) statsResponse.getResponse().findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get(metric);
+  }
+}