You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by no...@apache.org on 2016/08/31 15:38:12 UTC

lucene-solr:branch_5x: SOLR-9310: PeerSync fails on a node restart due to IndexFingerPrint mismatch

Repository: lucene-solr
Updated Branches:
  refs/heads/branch_5x bcaf4999d -> 4f6e25467


SOLR-9310: PeerSync fails on a node restart due to IndexFingerPrint mismatch


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4f6e2546
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4f6e2546
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4f6e2546

Branch: refs/heads/branch_5x
Commit: 4f6e2546739e5352738f786aaddfb6f08b1549aa
Parents: bcaf499
Author: Noble Paul <no...@apache.org>
Authored: Tue Aug 23 22:26:11 2016 +0530
Committer: Noble Paul <no...@apache.org>
Committed: Wed Aug 31 21:07:59 2016 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   2 +
 .../handler/component/RealTimeGetComponent.java |  23 +-
 .../java/org/apache/solr/update/PeerSync.java   |  29 +-
 .../java/org/apache/solr/update/UpdateLog.java  |   7 +-
 .../processor/DistributedUpdateProcessor.java   |  30 +-
 .../solr/cloud/PeerSyncReplicationTest.java     | 363 +++++++++++++++++++
 6 files changed, 429 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f6e2546/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7620a7b..7a9efa2 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -49,6 +49,8 @@ Bug Fixes
 * SOLR-7280: Load cores in sorted order & limit threads to improve cluster stability (noble, Erick Erickson, shalin)
 
 
+* SOLR-9310: PeerSync fails on a node restart due to IndexFingerPrint mismatch (Pushkar Raste, noble)
+
 ======================= 5.5.2 =======================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f6e2546/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
index b30c786..dc237de 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
@@ -20,6 +20,8 @@ import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.net.URL;
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -557,14 +559,15 @@ public class RealTimeGetComponent extends SearchComponent
     UpdateLog ulog = req.getCore().getUpdateHandler().getUpdateLog();
     if (ulog == null) return;
 
-    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
-      rb.rsp.add("versions", recentUpdates.getVersions(nVersions));
-    }
-
     if (doFingerprint) {
       IndexFingerprint fingerprint = IndexFingerprint.getFingerprint(req.getCore(), Long.MAX_VALUE);
       rb.rsp.add("fingerprint", fingerprint.toObject());
     }
+
+    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
+      List<Long> versions = recentUpdates.getVersions(nVersions);
+      rb.rsp.add("versions", versions);
+    }
   }
 
   
@@ -610,6 +613,18 @@ public class RealTimeGetComponent extends SearchComponent
 
     List<String> versions = StrUtils.splitSmart(versionsStr, ",", true);
 
+    // find fingerprint for max version for which updates are requested
+    boolean doFingerprint = params.getBool("fingerprint", false);
+    if (doFingerprint) {
+      String maxVersionForUpdate = Collections.min(versions, new Comparator<String>() {
+        @Override
+        public int compare(String s1, String s2) {
+          return PeerSync.absComparator.compare(Long.parseLong(s1), Long.parseLong(s2));
+        }
+      });
+      IndexFingerprint fingerprint = IndexFingerprint.getFingerprint(req.getCore(), Math.abs(Long.parseLong(maxVersionForUpdate)));
+      rb.rsp.add("fingerprint", fingerprint.toObject());
+    }
 
     List<Object> updates = new ArrayList<>(versions.size());
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f6e2546/solr/core/src/java/org/apache/solr/update/PeerSync.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/PeerSync.java b/solr/core/src/java/org/apache/solr/update/PeerSync.java
index dbc0091..c2fffd6 100644
--- a/solr/core/src/java/org/apache/solr/update/PeerSync.java
+++ b/solr/core/src/java/org/apache/solr/update/PeerSync.java
@@ -86,7 +86,7 @@ public class PeerSync  {
   private SolrCore core;
 
   // comparator that sorts by absolute value, putting highest first
-  private static Comparator<Long> absComparator = new Comparator<Long>() {
+  public static Comparator<Long> absComparator = new Comparator<Long>() {
     @Override
     public int compare(Long o1, Long o2) {
       long l1 = Math.abs(o1);
@@ -140,7 +140,7 @@ public class PeerSync  {
     this.maxUpdates = nUpdates;
     this.cantReachIsSuccess = cantReachIsSuccess;
     this.getNoVersionsIsSuccess = getNoVersionsIsSuccess;
-    this.doFingerprint = doFingerprint;
+    this.doFingerprint = doFingerprint && !("true".equals(System.getProperty("solr.disableFingerprint")));
     this.client = core.getCoreDescriptor().getCoreContainer().getUpdateShardHandler().getHttpClient();
     this.onlyIfActive = onlyIfActive;
     
@@ -458,9 +458,14 @@ public class PeerSync  {
   private boolean compareFingerprint(SyncShardRequest sreq) {
     if (sreq.fingerprint == null) return true;
     try {
-      IndexFingerprint ourFingerprint = IndexFingerprint.getFingerprint(core, Long.MAX_VALUE);
-      int cmp = IndexFingerprint.compare(ourFingerprint, sreq.fingerprint);
-      log.info("Fingerprint comparison: " + cmp);
+      // check our fingerprint only upto the max version in the other fingerprint.
+      // Otherwise for missed updates (look at missed update test in PeerSyncTest) ourFingerprint won't match with otherFingerprint
+      IndexFingerprint ourFingerprint = IndexFingerprint.getFingerprint(core, sreq.fingerprint.getMaxVersionSpecified());
+      int cmp = IndexFingerprint.compare(sreq.fingerprint, ourFingerprint);
+      log.info("Fingerprint comparison: {}" , cmp);
+      if(cmp != 0) {
+        log.info("Other fingerprint: {}, Our fingerprint: {}", sreq.fingerprint , ourFingerprint);
+      }
       return cmp == 0;  // currently, we only check for equality...
     } catch(IOException e){
       log.error(msg() + "Error getting index fingerprint", e);
@@ -482,6 +487,12 @@ public class PeerSync  {
     sreq.params.set("distrib", false);
     sreq.params.set("getUpdates", StrUtils.join(toRequest, ','));
     sreq.params.set("onlyIfActive", onlyIfActive);
+
+    // fingerprint should really be requested only for the maxversion  we are requesting updates for
+    // In case updates are coming in while node is coming up after restart, node would have already
+    // buffered some of the updates. fingerprint we requested with versions would reflect versions
+    // in our buffer as well and will definitely cause a mismatch
+    sreq.params.set("fingerprint",doFingerprint);
     sreq.responses.clear();  // needs to be zeroed for correct correlation to occur
 
     shardHandler.submit(sreq, sreq.shards[0], sreq.params);
@@ -500,6 +511,14 @@ public class PeerSync  {
       return false;
     }
 
+    // overwrite fingerprint we saved in 'handleVersions()'
+    Object fingerprint = srsp.getSolrResponse().getResponse().get("fingerprint");
+
+    if (fingerprint != null) {
+      sreq.fingerprint = IndexFingerprint.fromObject(fingerprint);
+    }
+
+
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set(DISTRIB_UPDATE_PARAM, FROMLEADER.toString());
     params.set("peersync",true); // debugging

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f6e2546/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 2456c3e..57ee239 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -951,11 +951,16 @@ public class UpdateLog implements PluginInfoInitialized {
       }
     }
 
-    public List<Long> getVersions(int n) {
+    public  List<Long> getVersions(int n){
+      return getVersions(n, Long.MAX_VALUE);
+    }
+
+    public List<Long> getVersions(int n, long maxVersion) {
       List<Long> ret = new ArrayList<>(n);
 
       for (List<Update> singleList : updateList) {
         for (Update ptr : singleList) {
+          if(Math.abs(ptr.version) > Math.abs(maxVersion)) continue;
           ret.add(ptr.version);
           if (--n <= 0) return ret;
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f6e2546/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index 4af8277..14c1286 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -1022,7 +1022,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
 
             // leaders can also be in buffering state during "migrate" API call, see SOLR-5308
             if (forwardedFromCollection && ulog.getState() != UpdateLog.State.ACTIVE
-                && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
+                && isReplayOrPeersync == false) {
               // we're not in an active state, and this update isn't from a replay, so buffer it.
               log.info("Leader logic applied but update log is buffering: " + cmd.getPrintableId());
               cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
@@ -1050,7 +1050,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
             // The leader forwarded us this update.
             cmd.setVersion(versionOnUpdate);
 
-            if (ulog.getState() != UpdateLog.State.ACTIVE && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
+            if (ulog.getState() != UpdateLog.State.ACTIVE && isReplayOrPeersync == false) {
               // we're not in an active state, and this update isn't from a replay, so buffer it.
               cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
               ulog.add(cmd);
@@ -1077,9 +1077,9 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
             }
           }
         }
-        
+
         boolean willDistrib = isLeader && nodes != null && nodes.size() > 0;
-        
+
         SolrInputDocument clonedDoc = null;
         if (willDistrib) {
           clonedDoc = cmd.solrDoc.deepCopy();
@@ -1087,7 +1087,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
 
         // TODO: possibly set checkDeleteByQueries as a flag on the command?
         doLocalAdd(cmd);
-        
+
         if (willDistrib) {
           cmd.solrDoc = clonedDoc;
         }
@@ -1119,7 +1119,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
     } else {
       oldDoc.remove(VERSION_FIELD);
     }
-    
+
 
     cmd.solrDoc = docMerger.merge(sdoc, oldDoc);
     return true;
@@ -1127,9 +1127,9 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
 
   @Override
   public void processDelete(DeleteUpdateCommand cmd) throws IOException {
-    
+
     assert TestInjection.injectFailUpdateRequests();
-    
+
     updateCommand = cmd;
 
     if (!cmd.isDeleteById()) {
@@ -1143,12 +1143,12 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
     } else {
       isLeader = getNonZkLeaderAssumption(req);
     }
-    
+
     boolean dropCmd = false;
     if (!forwardToLeader) {
       dropCmd  = versionDelete(cmd);
     }
-    
+
     if (dropCmd) {
       // TODO: do we need to add anything to the response?
       return;
@@ -1241,10 +1241,10 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
     //       - log + execute the local DBQ
     // FROM: we are a replica receiving a DBQ from our leader
     //       - log + execute the local DBQ
-    DistribPhase phase = 
+    DistribPhase phase =
     DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM));
 
-    DocCollection coll = zkEnabled 
+    DocCollection coll = zkEnabled
       ? zkController.getClusterState().getCollection(collection) : null;
 
     if (zkEnabled && DistribPhase.NONE == phase) {
@@ -1468,7 +1468,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
     if (!zkController.getZkClient().getConnectionManager().isLikelyExpired()) {
       return;
     }
-    
+
     throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Cannot talk to ZooKeeper - Updates are disabled.");
   }
 
@@ -1524,7 +1524,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
 
             // leaders can also be in buffering state during "migrate" API call, see SOLR-5308
             if (forwardedFromCollection && ulog.getState() != UpdateLog.State.ACTIVE
-                && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
+                && !isReplayOrPeersync) {
               // we're not in an active state, and this update isn't from a replay, so buffer it.
               log.info("Leader logic applied but update log is buffering: " + cmd.getId());
               cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
@@ -1549,7 +1549,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
           } else {
             cmd.setVersion(-versionOnUpdate);
 
-            if (ulog.getState() != UpdateLog.State.ACTIVE && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
+            if (ulog.getState() != UpdateLog.State.ACTIVE && isReplayOrPeersync == false) {
               // we're not in an active state, and this update isn't from a replay, so buffer it.
               cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
               ulog.delete(cmd);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f6e2546/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java b/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
new file mode 100644
index 0000000..458a283
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
@@ -0,0 +1,363 @@
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.cloud.ZkTestServer.LimitViolationAction;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.handler.ReplicationHandler;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test sync peer sync when a node restarts and documents are indexed when node was down.
+ *
+ * This test is modeled after SyncSliceTest
+ */
+@Slow
+public class PeerSyncReplicationTest extends AbstractFullDistribZkTestBase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private boolean success = false;
+  int docId = 0;
+
+  List<CloudJettyRunner> nodesDown = new ArrayList<>();
+
+  @Override
+  public void distribTearDown() throws Exception {
+    if (!success) {
+      printLayoutOnTearDown = true;
+    }
+    System.clearProperty("solr.directoryFactory");
+    System.clearProperty("solr.ulog.numRecordsToKeep");
+    System.clearProperty("tests.zk.violationReportAction");
+    super.distribTearDown();
+  }
+
+  public PeerSyncReplicationTest() {
+    super();
+    sliceCount = 1;
+    fixShardCount(3);
+  }
+
+  protected String getCloudSolrConfig() {
+    return "solrconfig-tlog.xml";
+  }
+
+  @Override
+  public void distribSetUp() throws Exception {
+    // tlog gets deleted after node restarts if we use CachingDirectoryFactory.
+    // make sure that tlog stays intact after we restart a node
+    System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
+    System.setProperty("solr.ulog.numRecordsToKeep", "1000");
+    System.setProperty("tests.zk.violationReportAction", LimitViolationAction.IGNORE.toString());
+    super.distribSetUp();
+  }
+
+  @Test
+  public void test() throws Exception {
+    handle.clear();
+    handle.put("timestamp", SKIPVAL);
+
+    waitForThingsToLevelOut(30);
+
+    del("*:*");
+
+    // index enough docs and commit to establish frame of reference for PeerSync
+    for (int i = 0; i < 100; i++) {
+      indexDoc(id, docId, i1, 50, tlong, 50, t1,
+          "document number " + docId++);
+    }
+    commit();
+    waitForThingsToLevelOut(30);
+
+    try {
+      checkShardConsistency(false, true);
+
+      long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+      assertEquals(docId, cloudClientDocs);
+
+      CloudJettyRunner initialLeaderJetty = shardToLeaderJetty.get("shard1");
+      List<CloudJettyRunner> otherJetties = getOtherAvailableJetties(initialLeaderJetty);
+      CloudJettyRunner neverLeader = otherJetties.get(otherJetties.size() - 1);
+      otherJetties.remove(neverLeader) ;
+
+      // first shutdown a node that will never be a leader
+      forceNodeFailures(Arrays.asList(neverLeader));
+
+      // node failure and recovery via PeerSync
+      log.info("Forcing PeerSync");
+      CloudJettyRunner nodePeerSynced = forceNodeFailureAndDoPeerSync(false);
+
+      // add a few more docs
+      indexDoc(id, docId, i1, 50, tlong, 50, t1,
+          "document number " + docId++);
+      indexDoc(id, docId, i1, 50, tlong, 50, t1,
+          "document number " + docId++);
+      commit();
+
+      cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+      assertEquals(docId, cloudClientDocs);
+
+      // now shutdown all other nodes except for 'nodeShutDownForFailure'
+      otherJetties.remove(nodePeerSynced);
+      forceNodeFailures(otherJetties);
+      waitForThingsToLevelOut(30);
+      checkShardConsistency(false, true);
+
+      // now shutdown the original leader
+      log.info("Now shutting down initial leader");
+      forceNodeFailures(Arrays.asList(initialLeaderJetty));
+      log.info("Updating mappings from zk");
+      Thread.sleep(15000); // sleep for a while for leader to change ...
+      updateMappingsFromZk(jettys, clients, true);
+      assertEquals("PeerSynced node did not become leader", nodePeerSynced, shardToLeaderJetty.get("shard1"));
+
+      // bring up node that was down all along, and let it PeerSync from the node that was forced to PeerSynce  
+      bringUpDeadNodeAndEnsureNoReplication(shardToLeaderJetty.get("shard1"), neverLeader, false);
+      waitTillNodesActive();
+
+      checkShardConsistency(false, true);
+
+
+      // bring back all the nodes including initial leader 
+      // (commented as reports Maximum concurrent create/delete watches above limit violation and reports thread leaks)
+      /*for(int i = 0 ; i < nodesDown.size(); i++) {
+        bringUpDeadNodeAndEnsureNoReplication(shardToLeaderJetty.get("shard1"), neverLeader, false);
+      }
+      checkShardConsistency(false, true);*/
+
+      // make sure leader has not changed after bringing initial leader back
+      assertEquals(nodePeerSynced, shardToLeaderJetty.get("shard1"));
+      success = true;
+    } finally {
+      System.clearProperty("solr.disableFingerprint");
+    }
+  }
+
+
+  private void indexInBackground(final int numDocs) {
+    new Thread(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          for (int i = 0; i < numDocs; i++) {
+            PeerSyncReplicationTest.this.indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId);
+            docId++;
+            // slow down adds, to get documents indexed while in PeerSync
+            Thread.sleep(100);
+          }
+        } catch (Exception e) {
+          log.error("Error indexing doc in background", e);
+          //Throwing an error here will kill the thread
+        }
+      }
+    }, getClassName())
+        .start();
+
+
+  }
+
+
+  private void forceNodeFailures(List<CloudJettyRunner> replicasToShutDown) throws Exception {
+    for (CloudJettyRunner replicaToShutDown : replicasToShutDown) {
+      chaosMonkey.killJetty(replicaToShutDown);
+      waitForNoShardInconsistency();
+    }
+
+    int totalDown = 0;
+
+    Set<CloudJettyRunner> jetties = new HashSet<>();
+    jetties.addAll(shardToJetty.get("shard1"));
+
+    if (replicasToShutDown != null) {
+      jetties.removeAll(replicasToShutDown);
+      totalDown += replicasToShutDown.size();
+    }
+
+    jetties.removeAll(nodesDown);
+    totalDown += nodesDown.size();
+
+    assertEquals(getShardCount() - totalDown, jetties.size());
+
+    nodesDown.addAll(replicasToShutDown);
+
+    Thread.sleep(3000);
+  }
+
+
+
+  private CloudJettyRunner forceNodeFailureAndDoPeerSync(boolean disableFingerprint)
+      throws Exception {
+    // kill non leader - new leader could have all the docs or be missing one
+    CloudJettyRunner leaderJetty = shardToLeaderJetty.get("shard1");
+
+    List<CloudJettyRunner> nonLeaderJetties = getOtherAvailableJetties(leaderJetty);
+    CloudJettyRunner replicaToShutDown = nonLeaderJetties.get(random().nextInt(nonLeaderJetties.size())); // random non leader node
+
+    forceNodeFailures(Arrays.asList(replicaToShutDown));
+
+    // two docs need to be sync'd back when replica restarts
+    indexDoc(id, docId, i1, 50, tlong, 50, t1,
+        "document number " + docId++);
+    indexDoc(id, docId, i1, 50, tlong, 50, t1,
+        "document number " + docId++);
+    commit();
+
+    bringUpDeadNodeAndEnsureNoReplication(leaderJetty, replicaToShutDown, disableFingerprint);
+
+    return replicaToShutDown;
+  }
+
+
+
+  private void bringUpDeadNodeAndEnsureNoReplication(CloudJettyRunner leaderJetty, CloudJettyRunner nodeToBringUp,
+                                                     boolean disableFingerprint) throws Exception {
+    // disable fingerprint check if needed
+    System.setProperty("solr.disableFingerprint", String.valueOf(disableFingerprint));
+
+    long numRequestsBefore = (Long) leaderJetty.jetty
+        .getCoreContainer()
+        .getCores()
+        .iterator()
+        .next()
+        .getRequestHandler(ReplicationHandler.PATH)
+        .getStatistics().get("requests");
+
+    indexInBackground(50);
+
+    // bring back dead node and ensure it recovers
+    ChaosMonkey.start(nodeToBringUp.jetty);
+
+    nodesDown.remove(nodeToBringUp);
+
+    waitTillNodesActive();
+    waitForThingsToLevelOut(30);
+
+    Set<CloudJettyRunner> jetties = new HashSet<>();
+    jetties.addAll(shardToJetty.get("shard1"));
+    jetties.removeAll(nodesDown);
+    assertEquals(getShardCount() - nodesDown.size(), jetties.size());
+
+    long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+    assertEquals(docId, cloudClientDocs);
+
+    long numRequestsAfter = (Long) leaderJetty.jetty
+        .getCoreContainer()
+        .getCores()
+        .iterator()
+        .next()
+        .getRequestHandler(ReplicationHandler.PATH)
+        .getStatistics().get("requests");
+
+    assertEquals("PeerSync failed. Had to fail back to replication", numRequestsBefore, numRequestsAfter);
+  }
+
+
+
+  private void waitTillNodesActive() throws Exception {
+    for (int i = 0; i < 60; i++) {
+      Thread.sleep(3000);
+      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ClusterState clusterState = zkStateReader.getClusterState();
+      DocCollection collection1 = clusterState.getCollection("collection1");
+      Slice slice = collection1.getSlice("shard1");
+      Collection<Replica> replicas = slice.getReplicas();
+      boolean allActive = true;
+
+      Collection<Replica> replicasToCheck = new ArrayList<>();
+      for (Replica r : replicas) {
+        if(nodesDown.contains(r.getName())) replicasToCheck.add(r);
+      }
+
+      for (Replica replica : replicasToCheck) {
+        if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
+          allActive = false;
+          break;
+        }
+      }
+      if (allActive) {
+        return;
+      }
+    }
+    printLayout();
+    fail("timeout waiting to see all nodes active");
+  }
+
+
+
+  private List<CloudJettyRunner> getOtherAvailableJetties(CloudJettyRunner leader) {
+    List<CloudJettyRunner> candidates = new ArrayList<>();
+    candidates.addAll(shardToJetty.get("shard1"));
+
+    if (leader != null) {
+      candidates.remove(leader);
+    }
+
+    candidates.removeAll(nodesDown);
+
+    return candidates;
+  }
+
+
+
+  protected void indexDoc(Object... fields) throws IOException,
+      SolrServerException {
+    SolrInputDocument doc = new SolrInputDocument();
+
+    addFields(doc, fields);
+    addFields(doc, "rnd_s", RandomStringUtils.random(random().nextInt(100) + 100));
+
+    UpdateRequest ureq = new UpdateRequest();
+    ureq.add(doc);
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    ureq.setParams(params);
+    ureq.process(cloudClient);
+  }
+
+  // skip the randoms - they can deadlock...
+  @Override
+  protected void indexr(Object... fields) throws Exception {
+    SolrInputDocument doc = new SolrInputDocument();
+    addFields(doc, fields);
+    addFields(doc, "rnd_b", true);
+    indexDoc(doc);
+  }
+
+}