You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by kr...@apache.org on 2017/01/27 15:19:55 UTC

[09/14] lucene-solr:jira/solr-8593: SOLR-5944: In-place updates of Numeric DocValues

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53754108/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java b/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java
index 8f3a89a..fed30a1 100644
--- a/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java
+++ b/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java
@@ -16,21 +16,29 @@
  */
 package org.apache.solr.update;
 
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
+import java.util.Set;
+
 import org.apache.solr.BaseDistributedSearchTestCase;
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.update.processor.DistributedUpdateProcessor;
+import org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
 import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
-import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+import static org.junit.internal.matchers.StringContains.containsString;
 
 @SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
 public class PeerSyncTest extends BaseDistributedSearchTestCase {
@@ -46,11 +54,24 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     // TODO: a better way to do this?
     configString = "solrconfig-tlog.xml";
     schemaString = "schema.xml";
+
+    // validate that the schema was not changed to an unexpected state
+    try {
+      initCore(configString, schemaString);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertTrue(schema.getFieldOrNull("_version_").hasDocValues() && !schema.getFieldOrNull("_version_").indexed()
+        && !schema.getFieldOrNull("_version_").stored());
+    assertTrue(!schema.getFieldOrNull("val_i_dvo").indexed() && !schema.getFieldOrNull("val_i_dvo").stored() &&
+        schema.getFieldOrNull("val_i_dvo").hasDocValues());
   }
 
   @Test
   @ShardsFixed(num = 3)
   public void test() throws Exception {
+    Set<Integer> docsAdded = new LinkedHashSet<>();
     handle.clear();
     handle.put("timestamp", SKIPVAL);
     handle.put("score", SKIPVAL);
@@ -91,14 +112,17 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     add(client0, seenLeader, addRandFields(sdoc("id","8","_version_",++v)));
     add(client0, seenLeader, addRandFields(sdoc("id","9","_version_",++v)));
     add(client0, seenLeader, addRandFields(sdoc("id","10","_version_",++v)));
-
+    for (int i=0; i<10; i++) docsAdded.add(i+1);
     assertSync(client1, numVersions, true, shardsArr[0]);
 
-    client0.commit(); client1.commit(); queryAndCompare(params("q", "*:*"), client0, client1);
+    client0.commit(); client1.commit();
+    QueryResponse qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000"), client0, client1);
+    validateQACResponse(docsAdded, qacResponse);
 
     int toAdd = (int)(numVersions *.95);
     for (int i=0; i<toAdd; i++) {
       add(client0, seenLeader, sdoc("id",Integer.toString(i+11),"_version_",v+i+1));
+      docsAdded.add(i+11);
     }
 
     // sync should fail since there's not enough overlap to give us confidence
@@ -111,19 +135,24 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     }
 
     assertSync(client1, numVersions, true, shardsArr[0]);
-    client0.commit(); client1.commit(); queryAndCompare(params("q", "*:*", "sort","_version_ desc"), client0, client1);
+    client0.commit(); client1.commit();
+    qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000", "sort","_version_ desc"), client0, client1);
+    validateQACResponse(docsAdded, qacResponse);
 
     // test delete and deleteByQuery
     v=1000;
-    add(client0, seenLeader, sdoc("id","1000","_version_",++v));
+    SolrInputDocument doc = sdoc("id","1000","_version_",++v);
+    add(client0, seenLeader, doc);
     add(client0, seenLeader, sdoc("id","1001","_version_",++v));
     delQ(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_",Long.toString(-++v)), "id:1001 OR id:1002");
     add(client0, seenLeader, sdoc("id","1002","_version_",++v));
     del(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_",Long.toString(-++v)), "1000");
+    docsAdded.add(1002); // 1002 added
 
     assertSync(client1, numVersions, true, shardsArr[0]);
-    client0.commit(); client1.commit(); 
-    queryAndCompare(params("q", "*:*", "sort","_version_ desc"), client0, client1);
+    client0.commit(); client1.commit();
+    qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000", "sort","_version_ desc"), client0, client1);
+    validateQACResponse(docsAdded, qacResponse);
 
     // test that delete by query is returned even if not requested, and that it doesn't delete newer stuff than it should
     v=2000;
@@ -133,6 +162,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     delQ(client, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_",Long.toString(-++v)), "id:2001 OR id:2002");
     add(client, seenLeader, sdoc("id","2002","_version_",++v));
     del(client, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_",Long.toString(-++v)), "2000");
+    docsAdded.add(2002); // 2002 added
 
     v=2000;
     client = client1;
@@ -144,7 +174,9 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     del(client, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_",Long.toString(-++v)), "2000");
 
     assertSync(client1, numVersions, true, shardsArr[0]);
-    client0.commit(); client1.commit(); queryAndCompare(params("q", "*:*", "sort","_version_ desc"), client0, client1);
+    client0.commit(); client1.commit();
+    qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000", "sort","_version_ desc"), client0, client1);
+    validateQACResponse(docsAdded, qacResponse);
 
     //
     // Test that handling reorders work when applying docs retrieved from peer
@@ -155,6 +187,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     add(client0, seenLeader, sdoc("id","3000","_version_",3001));
     add(client1, seenLeader, sdoc("id","3000","_version_",3001));
     del(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","3000"),  "3000");
+    docsAdded.add(3000);
 
     // this should cause us to retrieve an add tha was previously deleted
     add(client0, seenLeader, sdoc("id","3001","_version_",3003));
@@ -165,17 +198,23 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     add(client0, seenLeader, sdoc("id","3002","_version_",3004));
     add(client0, seenLeader, sdoc("id","3002","_version_",3005));
     add(client1, seenLeader, sdoc("id","3002","_version_",3005));
-
+    docsAdded.add(3001); // 3001 added
+    docsAdded.add(3002); // 3002 added
+    
     assertSync(client1, numVersions, true, shardsArr[0]);
-    client0.commit(); client1.commit(); queryAndCompare(params("q", "*:*", "sort","_version_ desc"), client0, client1);
+    client0.commit(); client1.commit();
+    qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000", "sort","_version_ desc"), client0, client1);
+    validateQACResponse(docsAdded, qacResponse);
 
     // now lets check fingerprinting causes appropriate fails
     v = 4000;
     add(client0, seenLeader, sdoc("id",Integer.toString((int)v),"_version_",v));
+    docsAdded.add(4000);
     toAdd = numVersions+10;
     for (int i=0; i<toAdd; i++) {
       add(client0, seenLeader, sdoc("id",Integer.toString((int)v+i+1),"_version_",v+i+1));
       add(client1, seenLeader, sdoc("id",Integer.toString((int)v+i+1),"_version_",v+i+1));
+      docsAdded.add((int)v+i+1);
     }
 
     // client0 now has an additional add beyond our window and the fingerprint should cause this to fail
@@ -198,14 +237,95 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
       add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
     }
     assertSync(client1, numVersions, true, shardsArr[0]);
+    
+    client0.commit(); client1.commit();
+    qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000", "sort","_version_ desc"), client0, client1);
+    validateQACResponse(docsAdded, qacResponse);
+
+    // lets add some in-place updates
+    add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000)); // full update
+    docsAdded.add(5000);
+    assertSync(client1, numVersions, true, shardsArr[0]);
+    // verify the in-place updated document (id=5000) has correct fields
+    assertEquals(0, client1.getById("5000").get("val_i_dvo"));
+    assertEquals(client0.getById("5000")+" and "+client1.getById("5000"), 
+        "mytitle", client1.getById("5000").getFirstValue("title"));
+
+    ModifiableSolrParams inPlaceParams = new ModifiableSolrParams(seenLeader);
+    inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5000");
+    add(client0, inPlaceParams, sdoc("id", "5000", "val_i_dvo", 1, "_version_", 5001)); // in-place update
+    assertSync(client1, numVersions, true, shardsArr[0]);
+    // verify the in-place updated document (id=5000) has correct fields
+    assertEquals(1, client1.getById("5000").get("val_i_dvo"));
+    assertEquals(client0.getById("5000")+" and "+client1.getById("5000"), 
+        "mytitle", client1.getById("5000").getFirstValue("title"));
+
+    // interleave the in-place updates with a few deletes to other documents
+    del(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","5002"),  4001);
+    delQ(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","5003"),  "id:4002");
+    docsAdded.remove(4001);
+    docsAdded.remove(4002);
+    
+    inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5001");
+    add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 2, "_version_", 5004)); // in-place update
+    assertSync(client1, numVersions, true, shardsArr[0]);
+    // verify the in-place updated document (id=5000) has correct fields
+    assertEquals(2, client1.getById("5000").get("val_i_dvo"));
+    assertEquals(client0.getById("5000")+" and "+client1.getById("5000"), 
+        "mytitle", client1.getById("5000").getFirstValue("title"));
 
-  }
+    // a DBQ with value
+    delQ(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","5005"),  "val_i_dvo:1"); // current val is 2, so this should not delete anything
+    assertSync(client1, numVersions, true, shardsArr[0]);
 
+    boolean deleteTheUpdatedDocument = random().nextBoolean();
+    if (deleteTheUpdatedDocument) { // if doc with id=5000 is deleted, further in-place-updates should fail
+      delQ(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","5006"),  "val_i_dvo:2"); // current val is 2, this will delete id=5000
+      assertSync(client1, numVersions, true, shardsArr[0]);
+      SolrException ex = expectThrows(SolrException.class, () -> {
+        inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5004");
+        add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 3, "_version_", 5007));
+      });
+      assertEquals(ex.toString(), SolrException.ErrorCode.SERVER_ERROR.code, ex.code());
+      assertThat(ex.getMessage(), containsString("Can't find document with id=5000"));
+    } else {
+      inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5004");
+      add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 3, "_version_", 5006));
+      assertSync(client1, numVersions, true, shardsArr[0]);
+
+      // verify the in-place updated document (id=5000) has correct fields
+      assertEquals(3, client1.getById("5000").get("val_i_dvo"));
+      assertEquals(client0.getById("5000")+" and "+client1.getById("5000"), 
+          "mytitle", client1.getById("5000").getFirstValue("title"));
+
+      if (random().nextBoolean()) {
+        client0.commit(); client1.commit();
+        qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000", "sort","_version_ desc"), client0, client1);
+        validateQACResponse(docsAdded, qacResponse);
+      }
+      del(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","5007"),  5000);
+      docsAdded.remove(5000);
+      assertSync(client1, numVersions, true, shardsArr[0]);
+
+      client0.commit(); client1.commit();
+      qacResponse = queryAndCompare(params("q", "*:*", "rows", "10000", "sort","_version_ desc"), client0, client1);
+      validateQACResponse(docsAdded, qacResponse);
+    }
+  }
 
   void assertSync(SolrClient client, int numVersions, boolean expectedResult, String... syncWith) throws IOException, SolrServerException {
     QueryRequest qr = new QueryRequest(params("qt","/get", "getVersions",Integer.toString(numVersions), "sync", StrUtils.join(Arrays.asList(syncWith), ',')));
     NamedList rsp = client.request(qr);
     assertEquals(expectedResult, (Boolean) rsp.get("sync"));
   }
+  
+  void validateQACResponse(Set<Integer> docsAdded, QueryResponse qacResponse) {
+    Set<Integer> qacDocs = new LinkedHashSet<>();
+    for (int i=0; i<qacResponse.getResults().size(); i++) {
+      qacDocs.add(Integer.parseInt(qacResponse.getResults().get(i).getFieldValue("id").toString()));
+    }
+    assertEquals(docsAdded, qacDocs);
+    assertEquals(docsAdded.size(), qacResponse.getResults().getNumFound());
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53754108/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java b/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java
index 7d1c4c7..0f53f33 100644
--- a/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java
+++ b/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java
@@ -99,8 +99,8 @@ public class SolrIndexConfigTest extends SolrTestCaseJ4 {
   }
 
   public void testSortingMPSolrIndexConfigCreation() throws Exception {
-    final String expectedFieldName = "timestamp";
-    final SortField.Type expectedFieldType = SortField.Type.LONG;
+    final String expectedFieldName = "timestamp_i_dvo";
+    final SortField.Type expectedFieldType = SortField.Type.INT;
     final boolean expectedFieldSortDescending = true;
 
     SolrConfig solrConfig = new SolrConfig(instanceDir, solrConfigFileNameSortingMergePolicyFactory, null);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53754108/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
new file mode 100644
index 0000000..74360e3
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
@@ -0,0 +1,1101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.request.schema.SchemaRequest.Field;
+import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldResponse;
+import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.index.LogDocMergePolicyFactory;
+import org.apache.solr.update.processor.DistributedUpdateProcessor;
+import org.apache.solr.util.DefaultSolrThreadFactory;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tests the in-place updates (docValues updates) for a one shard, three replica cluster.
+ */
+@Slow
+public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void beforeSuperClass() throws Exception {
+    System.setProperty("solr.tests.intClassName", random().nextBoolean()? "TrieIntField": "IntPointField");
+    System.setProperty("solr.tests.longClassName", random().nextBoolean()? "TrieLongField": "LongPointField");
+    System.setProperty("solr.tests.floatClassName", random().nextBoolean()? "TrieFloatField": "FloatPointField");
+    System.setProperty("solr.tests.doubleClassName", random().nextBoolean()? "TrieDoubleField": "DoublePointField");
+
+    schemaString = "schema-inplace-updates.xml";
+    configString = "solrconfig-tlog.xml";
+
+    // we need consistent segments that aren't re-ordered on merge because we're
+    // asserting inplace updates happen by checking the internal [docid]
+    systemSetPropertySolrTestsMergePolicy(LogDocMergePolicy.class.getName());
+    systemSetPropertySolrTestsMergePolicyFactory(LogDocMergePolicyFactory.class.getName());
+    
+    initCore(configString, schemaString);
+    
+    // sanity check that autocommits are disabled
+    assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxTime);
+    assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxTime);
+    assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxDocs);
+    assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxDocs);
+  }
+  
+  @After
+  public void after() {
+    System.clearProperty("solr.tests.intClassName");
+    System.clearProperty("solr.tests.longClassName");
+    System.clearProperty("solr.tests.floatClassName");
+    System.clearProperty("solr.tests.doubleClassName");
+  }
+
+  public TestInPlaceUpdatesDistrib() throws Exception {
+    super();
+    sliceCount = 1;
+    fixShardCount(3);
+  }
+
+  private SolrClient LEADER = null;
+  private List<SolrClient> NONLEADERS = null;
+  
+  @Test
+  @ShardsFixed(num = 3)
+  @SuppressWarnings("unchecked")
+  public void test() throws Exception {
+    waitForRecoveriesToFinish(true);
+    mapReplicasToClients();
+    
+    // sanity check no one broke the assumptions we make about our schema
+    checkExpectedSchemaField(map("name", "inplace_updatable_int",
+        "type","int",
+        "stored",Boolean.FALSE,
+        "indexed",Boolean.FALSE,
+        "docValues",Boolean.TRUE));
+    checkExpectedSchemaField(map("name", "inplace_updatable_float",
+        "type","float",
+        "stored",Boolean.FALSE,
+        "indexed",Boolean.FALSE,
+        "docValues",Boolean.TRUE));
+    checkExpectedSchemaField(map("name", "_version_",
+        "type","long",
+        "stored",Boolean.FALSE,
+        "indexed",Boolean.FALSE,
+        "docValues",Boolean.TRUE));
+
+    // Do the tests now:
+    testDBQUsingUpdatedFieldFromDroppedUpdate();
+    outOfOrderDBQsTest();
+    docValuesUpdateTest();
+    ensureRtgWorksWithPartialUpdatesTest();
+    delayedReorderingFetchesMissingUpdateFromLeaderTest();
+    outOfOrderUpdatesIndividualReplicaTest();
+    outOfOrderDeleteUpdatesIndividualReplicaTest();
+    reorderedDBQsWithInPlaceUpdatesShouldNotThrowReplicaInLIRTest();
+  }
+  
+  private void mapReplicasToClients() throws KeeperException, InterruptedException {
+    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    Replica leader = null;
+    Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1);
+    leader = shard1.getLeader();
+
+    String leaderBaseUrl = zkStateReader.getBaseUrlForNodeName(leader.getNodeName());
+    for (int i=0; i<clients.size(); i++) {
+      if (((HttpSolrClient)clients.get(i)).getBaseURL().startsWith(leaderBaseUrl))
+        LEADER = clients.get(i);
+    }
+    
+    NONLEADERS = new ArrayList<>();
+    for (Replica rep: shard1.getReplicas()) {
+      if (rep.equals(leader)) {
+        continue;
+      }
+      String baseUrl = zkStateReader.getBaseUrlForNodeName(rep.getNodeName());
+      for (int i=0; i<clients.size(); i++) {
+        if (((HttpSolrClient)clients.get(i)).getBaseURL().startsWith(baseUrl))
+          NONLEADERS.add(clients.get(i));
+      }
+    }
+    
+    assertNotNull(LEADER);
+    assertEquals(2, NONLEADERS.size());
+  }
+
+  final int NUM_RETRIES = 100, WAIT_TIME = 10;
+
+  // The following should work: full update to doc 0, in-place update for doc 0, delete doc 0
+  private void outOfOrderDBQsTest() throws Exception {
+    
+    del("*:*");
+    commit();
+    
+    buildRandomIndex(0);
+
+    float inplace_updatable_float = 1;
+
+    // update doc, set
+    index("id", 0, "inplace_updatable_float", map("set", inplace_updatable_float));
+
+    LEADER.commit();
+    SolrDocument sdoc = LEADER.getById("0");  // RTG straight from the index
+    assertEquals(inplace_updatable_float, sdoc.get("inplace_updatable_float"));
+    assertEquals("title0", sdoc.get("title_s"));
+    long version0 = (long) sdoc.get("_version_");
+
+    // put replica out of sync
+    float newinplace_updatable_float = 100;
+    List<UpdateRequest> updates = new ArrayList<>();
+    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", version0 + 1)); // full update
+    updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, "inplace_updatable_float", newinplace_updatable_float + 1, "_version_", version0 + 2)); // inplace_updatable_float=101
+    updates.add(simulatedDeleteRequest(0, version0 + 3));
+
+    // order the updates correctly for NONLEADER 1
+    for (UpdateRequest update : updates) {
+      log.info("Issuing well ordered update: " + update.getDocuments());
+      NONLEADERS.get(1).request(update);
+    }
+
+    // Reordering needs to happen using parallel threads
+    ExecutorService threadpool = 
+        ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
+
+    // re-order the updates for NONLEADER 0
+    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
+    Collections.shuffle(reorderedUpdates, random());
+    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
+    for (UpdateRequest update : reorderedUpdates) {
+      AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
+      updateResponses.add(threadpool.submit(task));
+      // while we can't guarantee/trust what order the updates are executed in, since multiple threads
+      // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
+      Thread.sleep(10);
+    }
+    
+    threadpool.shutdown();
+    assertTrue("Thread pool didn't terminate within 10 secs", threadpool.awaitTermination(10, TimeUnit.SECONDS));
+    
+    // assert all requests were successful
+    for (Future<UpdateResponse> resp: updateResponses) {
+      assertEquals(0, resp.get().getStatus());
+    }
+
+    // assert both replicas have same effect
+    for (SolrClient client : NONLEADERS) { // 0th is re-ordered replica, 1st is well-ordered replica
+      SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
+      assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
+    }
+
+    log.info("outOfOrderDeleteUpdatesIndividualReplicaTest: This test passed fine...");
+    del("*:*");
+    commit();
+  }
+
+  private void docValuesUpdateTest() throws Exception {
+    del("*:*");
+    commit();
+
+    // number of docs we're testing (0 <= id), index may contain additional random docs (id < 0)
+    final int numDocs = atLeast(100);
+    log.info("Trying num docs = " + numDocs);
+    final List<Integer> ids = new ArrayList<Integer>(numDocs);
+    for (int id = 0; id < numDocs; id++) {
+      ids.add(id);
+    }
+      
+    buildRandomIndex(101.0F, ids);
+    
+    List<Integer> luceneDocids = new ArrayList<>(numDocs);
+    List<Float> valuesList = new ArrayList<Float>(numDocs);
+    SolrParams params = params("q", "id:[0 TO *]", "fl", "*,[docid]", "rows", String.valueOf(numDocs), "sort", "id_i asc");
+    SolrDocumentList results = LEADER.query(params).getResults();
+    assertEquals(numDocs, results.size());
+    for (SolrDocument doc : results) {
+      luceneDocids.add((int) doc.get("[docid]"));
+      valuesList.add((Float) doc.get("inplace_updatable_float"));
+    }
+    log.info("Initial results: "+results);
+    
+    // before we do any atomic operations, sanity check our results against all clients
+    assertDocIdsAndValuesAgainstAllClients("sanitycheck", params, luceneDocids, valuesList);
+
+    // now we're going to overwrite the value for all of our testing docs
+    // giving them a value between -5 and +5
+    for (int id : ids) {
+      // NOTE: in rare cases, this may be setting the value to 0, on a doc that
+      // already had an init value of 0 -- which is an interesting edge case, so we don't exclude it
+      final float multiplier = random().nextBoolean() ? -5.0F : 5.0F;
+      final float value = r.nextFloat() * multiplier;
+      assert -5.0F <= value && value <= 5.0F;
+      valuesList.set(id, value);
+    }
+    log.info("inplace_updatable_float: " + valuesList);
+    
+    // update doc w/ set
+    Collections.shuffle(ids, random()); // so updates aren't applied in index order
+    for (int id : ids) {
+      index("id", id, "inplace_updatable_float", map("set", valuesList.get(id)));
+    }
+
+    commit();
+
+    assertDocIdsAndValuesAgainstAllClients
+      ("set", SolrParams.wrapDefaults(params("q", "inplace_updatable_float:[-5.0 TO 5.0]",
+                                             "fq", "id:[0 TO *]"),
+                                      // existing sort & fl that we want...
+                                      params),
+       luceneDocids, valuesList);
+      
+    // update doc, w/increment
+    log.info("Updating the documents...");
+    Collections.shuffle(ids, random()); // so updates aren't applied in the same order as our 'set'
+    for (int id : ids) {
+      // all incremements will use some value X such that 20 < abs(X)
+      // thus ensuring that after all incrememnts are done, there should be
+      // 0 test docs matching the query inplace_updatable_float:[-10 TO 10]
+      final float inc = (r.nextBoolean() ? -1.0F : 1.0F) * (random().nextFloat() + (float)atLeast(20));
+      assert 20 < Math.abs(inc);
+      final float value = valuesList.get(id) + inc;
+      assert value < -10 || 10 < value;
+        
+      valuesList.set(id, value);
+      index("id", id, "inplace_updatable_float", map("inc", inc));
+    }
+    commit();
+    
+    assertDocIdsAndValuesAgainstAllClients
+      ("inc", SolrParams.wrapDefaults(params("q", "-inplace_updatable_float:[-10.0 TO 10.0]",
+                                             "fq", "id:[0 TO *]"),
+                                      // existing sort & fl that we want...
+                                      params),
+       luceneDocids, valuesList);
+  }
+
+  /**
+   * Retries the specified 'req' against each SolrClient in "clients" untill the expected number of 
+   * results are returned, at which point the results are verified using assertDocIdsAndValuesInResults
+   *
+   * @param debug used in log and assertion messages
+   * @param req the query to execut, should include rows &amp; sort params such that the results can be compared to luceneDocids and valuesList
+   * @param luceneDocids a list of "[docid]" values to be tested against each doc in the req results (in order)
+   * @param valuesList a list of "inplace_updatable_float" values to be tested against each doc in the req results (in order)
+   */
+  private void assertDocIdsAndValuesAgainstAllClients(final String debug,
+                                                      final SolrParams req,
+                                                      final List<Integer> luceneDocids,
+                                                      final List<Float> valuesList) throws Exception {
+    assert luceneDocids.size() == valuesList.size();
+    final long numFoundExpected = luceneDocids.size();
+    
+    CLIENT: for (SolrClient client : clients) {
+      final String clientDebug = client.toString() + (LEADER.equals(client) ? " (leader)" : " (not leader)");
+      final String msg = "'"+debug+"' results against client: " + clientDebug;
+      SolrDocumentList results = null;
+      // For each client, do a (sorted) sanity check query to confirm searcher has been re-opened
+      // after our update -- if the numFound matches our expectations, then verify the inplace float
+      // value and [docid] of each result doc against our expecations to ensure that the values were
+      // updated properly w/o the doc being completley re-added internally. (ie: truly inplace)
+      RETRY: for (int attempt = 0; attempt <= NUM_RETRIES; attempt++) {
+        log.info("Attempt #{} checking {}", attempt, msg);
+        results = client.query(req).getResults();
+        if (numFoundExpected == results.getNumFound()) {
+          break RETRY;
+        }
+        if (attempt == NUM_RETRIES) {
+          fail("Repeated retry for "+msg+"; Never got numFound="+numFoundExpected+"; results=> "+results);
+        }
+        log.info("numFound missmatch, searcher may not have re-opened yet.  Will sleep an retry...");
+        Thread.sleep(WAIT_TIME);          
+      }
+      
+      assertDocIdsAndValuesInResults(msg, results, luceneDocids, valuesList);
+    }
+  }
+  
+  /**
+   * Given a result list sorted by "id", asserts that the "[docid] and "inplace_updatable_float" values 
+   * for each document match in order.
+   *
+   * @param msgPre used as a prefix for assertion messages
+   * @param results the sorted results of some query, such that all matches are included (ie: rows = numFound)
+   * @param luceneDocids a list of "[docid]" values to be tested against each doc in results (in order)
+   * @param valuesList a list of "inplace_updatable_float" values to be tested against each doc in results (in order)
+   */
+  private void assertDocIdsAndValuesInResults(final String msgPre,
+                                              final SolrDocumentList results,
+                                              final List<Integer> luceneDocids,
+                                              final List<Float> valuesList) {
+
+    assert luceneDocids.size() == valuesList.size();
+    assertEquals(msgPre + ": rows param wasn't big enough, we need to compare all results matching the query",
+                 results.getNumFound(), results.size());
+    assertEquals(msgPre + ": didn't get a result for every known docid",
+                 luceneDocids.size(), results.size());
+    
+    for (SolrDocument doc : results) {
+      final int id = Integer.parseInt(doc.get("id").toString());
+      final Object val = doc.get("inplace_updatable_float");
+      final Object docid = doc.get("[docid]");
+      assertEquals(msgPre + " wrong val for " + doc.toString(), valuesList.get(id), val);
+      assertEquals(msgPre + " wrong [docid] for " + doc.toString(), luceneDocids.get(id), docid);
+    }
+  }
+  
+  
+  private void ensureRtgWorksWithPartialUpdatesTest() throws Exception {
+    del("*:*");
+    commit();
+
+    float inplace_updatable_float = 1;
+    String title = "title100";
+    long version = 0, currentVersion;
+
+    currentVersion = buildRandomIndex(100).get(0);
+    assertTrue(currentVersion > version);
+
+    // do an initial (non-inplace) update to ensure both the float & int fields we care about have (any) value
+    // that way all subsequent atomic updates will be inplace
+    currentVersion = addDocAndGetVersion("id", 100,
+                                         "inplace_updatable_float", map("set", random().nextFloat()),
+                                         "inplace_updatable_int", map("set", random().nextInt()));
+    LEADER.commit();
+    
+    // get the internal docids of id=100 document from the three replicas
+    List<Integer> docids = getInternalDocIds("100");
+
+    // update doc, set
+    currentVersion = addDocAndGetVersion("id", 100, "inplace_updatable_float", map("set", inplace_updatable_float));
+    assertTrue(currentVersion > version);
+    version = currentVersion;
+    LEADER.commit();
+    assertTrue("Earlier: "+docids+", now: "+getInternalDocIds("100"), docids.equals(getInternalDocIds("100")));
+    
+    SolrDocument sdoc = LEADER.getById("100");  // RTG straight from the index
+    assertEquals(sdoc.toString(), (float) inplace_updatable_float, sdoc.get("inplace_updatable_float"));
+    assertEquals(sdoc.toString(), title, sdoc.get("title_s"));
+    assertEquals(sdoc.toString(), version, sdoc.get("_version_"));
+
+    if(random().nextBoolean()) {
+      title = "newtitle100";
+      currentVersion = addDocAndGetVersion("id", 100, "title_s", title, "inplace_updatable_float", inplace_updatable_float); // full indexing
+      assertTrue(currentVersion > version);
+      version = currentVersion;
+
+      sdoc = LEADER.getById("100");  // RTG from the tlog
+      assertEquals(sdoc.toString(), (float) inplace_updatable_float, sdoc.get("inplace_updatable_float"));
+      assertEquals(sdoc.toString(), title, sdoc.get("title_s"));
+      assertEquals(sdoc.toString(), version, sdoc.get("_version_"));
+
+      // we've done a full index, so we need to update the [docid] for each replica
+      LEADER.commit(); // can't get (real) [docid] from the tlogs, need to force a commit
+      docids = getInternalDocIds("100");
+    }
+
+    inplace_updatable_float++;
+    currentVersion = addDocAndGetVersion("id", 100, "inplace_updatable_float", map("inc", 1));
+    assertTrue(currentVersion > version);
+    version = currentVersion;
+    LEADER.commit();
+    assertTrue("Earlier: "+docids+", now: "+getInternalDocIds("100"), docids.equals(getInternalDocIds("100")));
+    
+    currentVersion = addDocAndGetVersion("id", 100, "inplace_updatable_int", map("set", "100"));
+    assertTrue(currentVersion > version);
+    version = currentVersion;
+
+    inplace_updatable_float++;
+    currentVersion = addDocAndGetVersion("id", 100, "inplace_updatable_float", map("inc", 1));
+    assertTrue(currentVersion > version);
+    version = currentVersion;
+
+    // RTG from tlog(s)
+    for (SolrClient client : clients) {
+      final String clientDebug = client.toString() + (LEADER.equals(client) ? " (leader)" : " (not leader)");
+      sdoc = client.getById("100", params("distrib", "false"));
+
+      assertEquals(clientDebug + " => "+ sdoc, (int) 100, sdoc.get("inplace_updatable_int"));
+      assertEquals(clientDebug + " => "+ sdoc, (float) inplace_updatable_float, sdoc.get("inplace_updatable_float"));
+      assertEquals(clientDebug + " => "+ sdoc, title, sdoc.get("title_s"));
+      assertEquals(clientDebug + " => "+ sdoc, version, sdoc.get("_version_"));
+    }
+    
+    // assert that the internal docid for id=100 document remains same, in each replica, as before
+    LEADER.commit(); // can't get (real) [docid] from the tlogs, need to force a commit
+    assertTrue("Earlier: "+docids+", now: "+getInternalDocIds("100"), docids.equals(getInternalDocIds("100")));
+  }
+
+  /**
+   * Returns the "[docid]" value(s) returned from a non-distrib RTG to each of the clients used 
+   * in this test (in the same order as the clients list)
+   */
+  private List<Integer> getInternalDocIds(String id) throws SolrServerException, IOException {
+    List<Integer> ret = new ArrayList<>(clients.size());
+    for (SolrClient client : clients) {
+      SolrDocument doc = client.getById(id, params("distrib", "false", "fl", "[docid]"));
+      Object docid = doc.get("[docid]");
+      assertNotNull(docid);
+      assertEquals(Integer.class, docid.getClass());
+      ret.add((Integer) docid);
+    }
+    assert clients.size() == ret.size();
+    return ret;
+  }
+
+  private void outOfOrderUpdatesIndividualReplicaTest() throws Exception {
+    
+    del("*:*");
+    commit();
+
+    buildRandomIndex(0);
+
+    float inplace_updatable_float = 1;
+    // update doc, set
+    index("id", 0, "inplace_updatable_float", map("set", inplace_updatable_float));
+
+    LEADER.commit();
+    SolrDocument sdoc = LEADER.getById("0");  // RTG straight from the index
+    assertEquals(inplace_updatable_float, sdoc.get("inplace_updatable_float"));
+    assertEquals("title0", sdoc.get("title_s"));
+    long version0 = (long) sdoc.get("_version_");
+
+    // put replica out of sync
+    float newinplace_updatable_float = 100;
+    List<UpdateRequest> updates = new ArrayList<>();
+    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", version0 + 1)); // full update
+    for (int i=1; i<atLeast(3); i++) {
+      updates.add(simulatedUpdateRequest(version0 + i, "id", 0, "inplace_updatable_float", newinplace_updatable_float + i, "_version_", version0 + i + 1));
+    }
+
+    // order the updates correctly for NONLEADER 1
+    for (UpdateRequest update : updates) {
+      log.info("Issuing well ordered update: " + update.getDocuments());
+      NONLEADERS.get(1).request(update);
+    }
+
+    // Reordering needs to happen using parallel threads, since some of these updates will
+    // be blocking calls, waiting for some previous updates to arrive on which it depends.
+    ExecutorService threadpool = 
+        ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
+
+    // re-order the updates for NONLEADER 0
+    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
+    Collections.shuffle(reorderedUpdates, random());
+    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
+    for (UpdateRequest update : reorderedUpdates) {
+      AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
+      updateResponses.add(threadpool.submit(task));
+      // while we can't guarantee/trust what order the updates are executed in, since multiple threads
+      // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
+      Thread.sleep(10);
+    }
+    
+    threadpool.shutdown();
+    assertTrue("Thread pool didn't terminate within 10 secs", threadpool.awaitTermination(10, TimeUnit.SECONDS));
+
+    // assert all requests were successful
+    for (Future<UpdateResponse> resp: updateResponses) {
+      assertEquals(0, resp.get().getStatus());
+    }
+
+    // assert both replicas have same effect
+    for (SolrClient client : NONLEADERS) { // 0th is re-ordered replica, 1st is well-ordered replica
+      log.info("Testing client: " + ((HttpSolrClient)client).getBaseURL());
+      assertReplicaValue(client, 0, "inplace_updatable_float", (newinplace_updatable_float + (float)(updates.size() - 1)), 
+          "inplace_updatable_float didn't match for replica at client: " + ((HttpSolrClient)client).getBaseURL());
+      assertReplicaValue(client, 0, "title_s", "title0_new", 
+          "Title didn't match for replica at client: " + ((HttpSolrClient)client).getBaseURL());
+      assertEquals(version0 + updates.size(), getReplicaValue(client, 0, "_version_"));
+    }
+
+    log.info("outOfOrderUpdatesIndividualReplicaTest: This test passed fine...");
+    del("*:*");
+    commit();
+  }
+  
+  // The following should work: full update to doc 0, in-place update for doc 0, delete doc 0
+  private void outOfOrderDeleteUpdatesIndividualReplicaTest() throws Exception {
+    
+    del("*:*");
+    commit();
+
+    buildRandomIndex(0);
+
+    float inplace_updatable_float = 1;
+    // update doc, set
+    index("id", 0, "inplace_updatable_float", map("set", inplace_updatable_float));
+
+    LEADER.commit();
+    SolrDocument sdoc = LEADER.getById("0");  // RTG straight from the index
+    assertEquals(inplace_updatable_float, sdoc.get("inplace_updatable_float"));
+    assertEquals("title0", sdoc.get("title_s"));
+    long version0 = (long) sdoc.get("_version_");
+
+    // put replica out of sync
+    float newinplace_updatable_float = 100;
+    List<UpdateRequest> updates = new ArrayList<>();
+    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", version0 + 1)); // full update
+    updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, "inplace_updatable_float", newinplace_updatable_float + 1, "_version_", version0 + 2)); // inplace_updatable_float=101
+    updates.add(simulatedDeleteRequest(0, version0 + 3));
+
+    // order the updates correctly for NONLEADER 1
+    for (UpdateRequest update : updates) {
+      log.info("Issuing well ordered update: " + update.getDocuments());
+      NONLEADERS.get(1).request(update);
+    }
+
+    // Reordering needs to happen using parallel threads
+    ExecutorService threadpool = 
+        ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
+
+    // re-order the updates for NONLEADER 0
+    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
+    Collections.shuffle(reorderedUpdates, random());
+    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
+    for (UpdateRequest update : reorderedUpdates) {
+      AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
+      updateResponses.add(threadpool.submit(task));
+      // while we can't guarantee/trust what order the updates are executed in, since multiple threads
+      // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
+      Thread.sleep(10);
+    }
+    
+    threadpool.shutdown();
+    assertTrue("Thread pool didn't terminate within 10 secs", threadpool.awaitTermination(10, TimeUnit.SECONDS));
+
+    // assert all requests were successful
+    for (Future<UpdateResponse> resp: updateResponses) {
+      assertEquals(0, resp.get().getStatus());
+    }
+
+    // assert both replicas have same effect
+    for (SolrClient client : NONLEADERS) { // 0th is re-ordered replica, 1st is well-ordered replica
+      SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
+      assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
+    }
+
+    log.info("outOfOrderDeleteUpdatesIndividualReplicaTest: This test passed fine...");
+    del("*:*");
+    commit();
+  }
+
+  /* Test for a situation when a document requiring in-place update cannot be "resurrected"
+   * when the original full indexed document has been deleted by an out of order DBQ.
+   * Expected behaviour in this case should be to throw the replica into LIR (since this will
+   * be rare). Here's an example of the situation:
+        ADD(id=x, val=5, ver=1)
+        UPD(id=x, val=10, ver = 2)
+        DBQ(q=val:10, v=4)
+        DV(id=x, val=5, ver=3)
+   */
+  private void reorderedDBQsWithInPlaceUpdatesShouldNotThrowReplicaInLIRTest() throws Exception {
+    del("*:*");
+    commit();
+
+    buildRandomIndex(0);
+
+    SolrDocument sdoc = LEADER.getById("0");  // RTG straight from the index
+    //assertEquals(value, sdoc.get("inplace_updatable_float"));
+    assertEquals("title0", sdoc.get("title_s"));
+    long version0 = (long) sdoc.get("_version_");
+
+    String field = "inplace_updatable_int";
+    
+    // put replica out of sync
+    List<UpdateRequest> updates = new ArrayList<>();
+    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", field, 5, "_version_", version0 + 1)); // full update
+    updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, field, 10, "_version_", version0 + 2)); // inplace_updatable_float=101
+    updates.add(simulatedUpdateRequest(version0 + 2, "id", 0, field, 5, "_version_", version0 + 3)); // inplace_updatable_float=101
+    updates.add(simulatedDeleteRequest(field+":10", version0 + 4)); // supposed to not delete anything
+
+    // order the updates correctly for NONLEADER 1
+    for (UpdateRequest update : updates) {
+      log.info("Issuing well ordered update: " + update.getDocuments());
+      NONLEADERS.get(1).request(update);
+    }
+
+    // Reordering needs to happen using parallel threads
+    ExecutorService threadpool = 
+        ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
+    // re-order the last two updates for NONLEADER 0
+    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
+    Collections.swap(reorderedUpdates, 2, 3);
+    
+    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
+    for (UpdateRequest update : reorderedUpdates) {
+      // pretend as this update is coming from the other non-leader, so that
+      // the resurrection can happen from there (instead of the leader)
+      update.setParam(DistributedUpdateProcessor.DISTRIB_FROM, ((HttpSolrClient)NONLEADERS.get(1)).getBaseURL());
+      AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0),
+                                                                         random().nextLong());
+      updateResponses.add(threadpool.submit(task));
+      // while we can't guarantee/trust what order the updates are executed in, since multiple threads
+      // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
+      Thread.sleep(10);
+    }
+    
+    threadpool.shutdown();
+    assertTrue("Thread pool didn't terminate within 10 secs", threadpool.awaitTermination(10, TimeUnit.SECONDS));
+
+    int successful = 0;
+    for (Future<UpdateResponse> resp: updateResponses) {
+      try {
+        UpdateResponse r = resp.get();
+        if (r.getStatus() == 0) {
+          successful++;
+        }
+      } catch (Exception ex) {
+        // reordered DBQ should trigger an error, thus throwing the replica into LIR.
+        // the cause of the error is that the full document was deleted by mistake due to the
+        // out of order DBQ, and the in-place update that arrives after the DBQ (but was supposed to 
+        // arrive before) cannot be applied, since the full document can't now be "resurrected".
+
+        if (!ex.getMessage().contains("Tried to fetch missing update"
+            + " from the leader, but missing wasn't present at leader.")) {
+          throw ex;
+        }
+      }
+    }
+    // All should succeed, i.e. no LIR
+    assertEquals(updateResponses.size(), successful);
+    
+    log.info("Non leader 0: "+((HttpSolrClient)NONLEADERS.get(0)).getBaseURL());
+    log.info("Non leader 1: "+((HttpSolrClient)NONLEADERS.get(1)).getBaseURL());
+    
+    SolrDocument doc0 = NONLEADERS.get(0).getById(String.valueOf(0), params("distrib", "false"));
+    SolrDocument doc1 = NONLEADERS.get(1).getById(String.valueOf(0), params("distrib", "false"));
+
+    log.info("Doc in both replica 0: "+doc0);
+    log.info("Doc in both replica 1: "+doc1);
+    // assert both replicas have same effect
+    for (int i=0; i<NONLEADERS.size(); i++) { // 0th is re-ordered replica, 1st is well-ordered replica
+      SolrClient client = NONLEADERS.get(i);
+      SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
+      assertNotNull("Client: "+((HttpSolrClient)client).getBaseURL(), doc);
+      assertEquals("Client: "+((HttpSolrClient)client).getBaseURL(), 5, doc.getFieldValue(field));
+    }
+
+    log.info("reorderedDBQsWithInPlaceUpdatesShouldNotThrowReplicaInLIRTest: This test passed fine...");
+    del("*:*");
+    commit();
+  }
+  
+  private void delayedReorderingFetchesMissingUpdateFromLeaderTest() throws Exception {
+    del("*:*");
+    commit();
+    
+    float inplace_updatable_float = 1F;
+    buildRandomIndex(inplace_updatable_float, Collections.singletonList(1));
+
+    float newinplace_updatable_float = 100F;
+    List<UpdateRequest> updates = new ArrayList<>();
+    updates.add(regularUpdateRequest("id", 1, "title_s", "title1_new", "id_i", 1, "inplace_updatable_float", newinplace_updatable_float));
+    updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1)));
+    updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1)));
+
+    // The next request to replica2 will be delayed by 6 secs (timeout is 5s)
+    shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay(
+        "Waiting for dependant update to timeout", 1, 6000);
+
+    ExecutorService threadpool =
+        ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
+    for (UpdateRequest update : updates) {
+      AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, cloudClient,
+                                                                         random().nextLong());
+      threadpool.submit(task);
+
+      // while we can't guarantee/trust what order the updates are executed in, since multiple threads
+      // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
+      Thread.sleep(100); 
+    }
+
+    threadpool.shutdown();
+    assertTrue("Thread pool didn't terminate within 10 secs", threadpool.awaitTermination(10, TimeUnit.SECONDS));
+
+    commit();
+
+    // TODO: Could try checking ZK for LIR flags to ensure LIR has not kicked in
+    // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
+    for (int i=0; i<100; i++) {
+      Thread.sleep(10);
+      cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+      ClusterState state = cloudClient.getZkStateReader().getClusterState();
+
+      int numActiveReplicas = 0;
+      for (Replica rep: state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas())
+        if (rep.getState().equals(Replica.State.ACTIVE))
+          numActiveReplicas++;
+
+      assertEquals("The replica receiving reordered updates must not have gone down", 3, numActiveReplicas);
+    }
+
+    for (SolrClient client : clients) {
+      log.info("Testing client (Fetch missing test): " + ((HttpSolrClient)client).getBaseURL());
+      log.info("Version at " + ((HttpSolrClient)client).getBaseURL() + " is: " + getReplicaValue(client, 1, "_version_"));
+
+      assertReplicaValue(client, 1, "inplace_updatable_float", (newinplace_updatable_float + 2.0f), 
+          "inplace_updatable_float didn't match for replica at client: " + ((HttpSolrClient)client).getBaseURL());
+      assertReplicaValue(client, 1, "title_s", "title1_new", 
+          "Title didn't match for replica at client: " + ((HttpSolrClient)client).getBaseURL());
+    }
+    
+    // Try another round of these updates, this time with a delete request at the end.
+    // This is to ensure that the fetch missing update from leader doesn't bomb out if the 
+    // document has been deleted on the leader later on
+    {
+      del("*:*");
+      commit();
+      shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().unsetDelay();
+      
+      updates.add(regularDeleteRequest(1));
+
+      shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay("Waiting for dependant update to timeout", 1, 5999); // the first update
+      shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay("Waiting for dependant update to timeout", 4, 5998); // the delete update
+
+      threadpool =
+          ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
+      for (UpdateRequest update : updates) {
+        AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, cloudClient,
+                                                                           random().nextLong());
+        threadpool.submit(task);
+        
+        // while we can't guarantee/trust what order the updates are executed in, since multiple threads
+        // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
+        Thread.sleep(100);
+      }
+
+      threadpool.shutdown();
+      assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
+
+      commit();
+
+      // TODO: Could try checking ZK for LIR flags to ensure LIR has not kicked in
+      // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
+      ZkController zkController = shardToLeaderJetty.get(SHARD1).jetty.getCoreContainer().getZkController();
+      String lirPath = zkController.getLeaderInitiatedRecoveryZnodePath(DEFAULT_TEST_COLLECTION_NAME, SHARD1);
+      assertFalse (zkController.getZkClient().exists(lirPath, true));
+
+      for (int i=0; i<100; i++) {
+        Thread.sleep(10);
+        cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+        ClusterState state = cloudClient.getZkStateReader().getClusterState();
+
+        int numActiveReplicas = 0;
+        for (Replica rep: state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas())
+          if (rep.getState().equals(Replica.State.ACTIVE))
+            numActiveReplicas++;
+
+        assertEquals("The replica receiving reordered updates must not have gone down", 3, numActiveReplicas);
+      }
+
+      for (SolrClient client: new SolrClient[] {LEADER, NONLEADERS.get(0), 
+          NONLEADERS.get(1)}) { // nonleader 0 re-ordered replica, nonleader 1 well-ordered replica
+        SolrDocument doc = client.getById(String.valueOf(1), params("distrib", "false"));
+        assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
+      }
+
+    }
+    log.info("delayedReorderingFetchesMissingUpdateFromLeaderTest: This test passed fine...");
+  }
+
+  /**
+   * Use the schema API to verify that the specified expected Field exists with those exact attributes. 
+   */
+  public void checkExpectedSchemaField(Map<String,Object> expected) throws Exception {
+    String fieldName = (String) expected.get("name");
+    assertNotNull("expected contains no name: " + expected, fieldName);
+    FieldResponse rsp = new Field(fieldName).process(this.cloudClient);
+    assertNotNull("Field Null Response: " + fieldName, rsp);
+    assertEquals("Field Status: " + fieldName + " => " + rsp.toString(), 0, rsp.getStatus());
+    assertEquals("Field: " + fieldName, expected, rsp.getField());
+  }
+
+  private static class AsyncUpdateWithRandomCommit implements Callable<UpdateResponse> {
+    UpdateRequest update;
+    SolrClient solrClient;
+    final Random rnd;
+
+    public AsyncUpdateWithRandomCommit (UpdateRequest update, SolrClient solrClient, long seed) {
+      this.update = update;
+      this.solrClient = solrClient;
+      this.rnd = new Random(seed);
+    }
+
+    @Override
+    public UpdateResponse call() throws Exception {
+      UpdateResponse resp = update.process(solrClient); //solrClient.request(update);
+      if (rnd.nextInt(3) == 0)
+        solrClient.commit();
+      return resp;
+    }
+  }
+  
+  Object getReplicaValue(SolrClient client, int doc, String field) throws SolrServerException, IOException {
+    SolrDocument sdoc = client.getById(String.valueOf(doc), params("distrib", "false"));
+    return sdoc==null? null: sdoc.get(field);
+  }
+
+  void assertReplicaValue(SolrClient client, int doc, String field, Object expected,
+      String message) throws SolrServerException, IOException {
+    assertEquals(message, expected, getReplicaValue(client, doc, field));
+  }
+
+  // This returns an UpdateRequest with the given fields that represent a document.
+  // This request is constructed such that it is a simulation of a request coming from
+  // a leader to a replica.
+  UpdateRequest simulatedUpdateRequest(Long prevVersion, Object... fields) throws SolrServerException, IOException {
+    SolrInputDocument doc = sdoc(fields);
+    
+    // get baseUrl of the leader
+    String baseUrl = getBaseUrl(doc.get("id").toString());
+
+    UpdateRequest ur = new UpdateRequest();
+    ur.add(doc);
+    ur.setParam("update.distrib", "FROMLEADER");
+    if (prevVersion != null) {
+      ur.setParam("distrib.inplace.prevversion", String.valueOf(prevVersion));
+      ur.setParam("distrib.inplace.update", "true");
+    }
+    ur.setParam("distrib.from", baseUrl);
+    return ur;
+  }
+
+  UpdateRequest simulatedDeleteRequest(int id, long version) throws SolrServerException, IOException {
+    String baseUrl = getBaseUrl(""+id);
+
+    UpdateRequest ur = new UpdateRequest();
+    if (random().nextBoolean()) {
+      ur.deleteById(""+id);
+    } else {
+      ur.deleteByQuery("id:"+id);
+    }
+    ur.setParam("_version_", ""+version);
+    ur.setParam("update.distrib", "FROMLEADER");
+    ur.setParam("distrib.from", baseUrl);
+    return ur;
+  }
+
+  UpdateRequest simulatedDeleteRequest(String query, long version) throws SolrServerException, IOException {
+    String baseUrl = getBaseUrl((HttpSolrClient)LEADER);
+
+    UpdateRequest ur = new UpdateRequest();
+    ur.deleteByQuery(query);
+    ur.setParam("_version_", ""+version);
+    ur.setParam("update.distrib", "FROMLEADER");
+    ur.setParam("distrib.from", baseUrl + DEFAULT_COLLECTION + "/");
+    return ur;
+  }
+
+  private String getBaseUrl(String id) {
+    DocCollection collection = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
+    Slice slice = collection.getRouter().getTargetSlice(id, null, null, null, collection);
+    String baseUrl = slice.getLeader().getCoreUrl();
+    return baseUrl;
+  }
+
+  UpdateRequest regularUpdateRequest(Object... fields) throws SolrServerException, IOException {
+    UpdateRequest ur = new UpdateRequest();
+    SolrInputDocument doc = sdoc(fields);
+    ur.add(doc);
+    return ur;
+  }
+
+  UpdateRequest regularDeleteRequest(int id) throws SolrServerException, IOException {
+    UpdateRequest ur = new UpdateRequest();
+    ur.deleteById(""+id);
+    return ur;
+  }
+
+  UpdateRequest regularDeleteByQueryRequest(String q) throws SolrServerException, IOException {
+    UpdateRequest ur = new UpdateRequest();
+    ur.deleteByQuery(q);
+    return ur;
+  }
+
+  @SuppressWarnings("rawtypes")
+  protected long addDocAndGetVersion(Object... fields) throws Exception {
+    SolrInputDocument doc = new SolrInputDocument();
+    addFields(doc, fields);
+    
+    UpdateRequest ureq = new UpdateRequest();
+    ureq.setParam("versions", "true");
+    ureq.add(doc);
+    UpdateResponse resp;
+    
+    // send updates to leader, to avoid SOLR-8733
+    resp = ureq.process(LEADER);
+    
+    long returnedVersion = Long.parseLong(((NamedList)resp.getResponse().get("adds")).getVal(0).toString());
+    assertTrue("Due to SOLR-8733, sometimes returned version is 0. Let us assert that we have successfully"
+        + " worked around that problem here.", returnedVersion > 0);
+    return returnedVersion;
+  }
+
+  /**
+   * Convinience method variant that never uses <code>initFloat</code>
+   * @see #buildRandomIndex(Float,List)
+   */
+  protected List<Long> buildRandomIndex(Integer... specialIds) throws Exception {
+    return buildRandomIndex(null, Arrays.asList(specialIds));
+  }
+                                        
+  /** 
+   * Helper method to build a randomized index with the fields needed for all test methods in this class.
+   * At a minimum, this index will contain 1 doc per "special" (non-negative) document id.  These special documents will be added with the <code>initFloat</code> specified in the "inplace_updatable_float" field.
+   *
+   * A random number of documents (with negative ids) will be indexed in between each of the 
+   * "special" documents, as well as before/after the first/last special document.
+   *
+   * @param initFloat Value to use in the "inplace_updatable_float" for the special documents; will never be used if null
+   * @param specialIds The ids to use for the special documents, all values must be non-negative
+   * @return the versions of each of the specials document returned when indexing it
+   */
+  protected List<Long> buildRandomIndex(Float initFloat, List<Integer> specialIds) throws Exception {
+    
+    int id = -1; // used for non special docs
+    final int numPreDocs = rarely() ? TestUtil.nextInt(random(),0,9) : atLeast(10);
+    for (int i = 1; i <= numPreDocs; i++) {
+      addDocAndGetVersion("id", id, "title_s", "title" + id, "id_i", id);
+      id--;
+    }
+    final List<Long> versions = new ArrayList<>(specialIds.size());
+    for (int special : specialIds) {
+      if (null == initFloat) {
+        versions.add(addDocAndGetVersion("id", special, "title_s", "title" + special, "id_i", special));
+      } else {
+        versions.add(addDocAndGetVersion("id", special, "title_s", "title" + special, "id_i", special,
+                                         "inplace_updatable_float", initFloat));
+      }
+      final int numPostDocs = rarely() ? TestUtil.nextInt(random(),0,9) : atLeast(10);
+      for (int i = 1; i <= numPostDocs; i++) {
+        addDocAndGetVersion("id", id, "title_s", "title" + id, "id_i", id);
+        id--;
+      }
+    }
+    LEADER.commit();
+    
+    assert specialIds.size() == versions.size();
+    return versions;
+  }
+
+  /*
+   * Situation:
+   * add(id=1,inpfield=12,title=mytitle,version=1)
+   * inp(id=1,inpfield=13,prevVersion=1,version=2) // timeout indefinitely
+   * inp(id=1,inpfield=14,prevVersion=2,version=3) // will wait till timeout, and then fetch a "not found" from leader
+   * dbq("inp:14",version=4)
+   */
+  private void testDBQUsingUpdatedFieldFromDroppedUpdate() throws Exception {
+    del("*:*");
+    commit();
+    
+    float inplace_updatable_float = 1F;
+    buildRandomIndex(inplace_updatable_float, Collections.singletonList(1));
+
+    List<UpdateRequest> updates = new ArrayList<>();
+    updates.add(regularUpdateRequest("id", 1, "id_i", 1, "inplace_updatable_float", 12, "title_s", "mytitle"));
+    updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1))); // delay indefinitely
+    updates.add(regularUpdateRequest("id", 1, "inplace_updatable_float", map("inc", 1)));
+    updates.add(regularDeleteByQueryRequest("inplace_updatable_float:14"));
+
+    // The second request will be delayed very very long, so that the next update actually gives up waiting for this
+    // and fetches a full update from the leader.
+    shardToJetty.get(SHARD1).get(1).jetty.getDebugFilter().addDelay(
+        "Waiting for dependant update to timeout", 2, 8000);
+
+    long seed = random().nextLong(); // seed for randomization within the threads
+    ExecutorService threadpool =
+        ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
+    for (UpdateRequest update : updates) {
+      AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, cloudClient,
+                                                                         random().nextLong());
+      threadpool.submit(task);
+
+      // while we can't guarantee/trust what order the updates are executed in, since multiple threads
+      // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
+      Thread.sleep(100); 
+    }
+
+    threadpool.shutdown();
+    assertTrue("Thread pool didn't terminate within 12 secs", threadpool.awaitTermination(12, TimeUnit.SECONDS));
+
+    commit();
+
+    // TODO: Could try checking ZK for LIR flags to ensure LIR has not kicked in
+    // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
+    for (int i=0; i<100; i++) {
+      Thread.sleep(10);
+      cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+      ClusterState state = cloudClient.getZkStateReader().getClusterState();
+
+      int numActiveReplicas = 0;
+      for (Replica rep: state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas())
+        if (rep.getState().equals(Replica.State.ACTIVE))
+          numActiveReplicas++;
+
+      assertEquals("The replica receiving reordered updates must not have gone down", 3, numActiveReplicas);
+    }
+
+    for (SolrClient client : clients) {
+      log.info("Testing client (testDBQUsingUpdatedFieldFromDroppedUpdate): " + ((HttpSolrClient)client).getBaseURL());
+      log.info("Version at " + ((HttpSolrClient)client).getBaseURL() + " is: " + getReplicaValue(client, 1, "_version_"));
+
+      assertNull(client.getById("1", params("distrib", "false")));
+    }
+
+    log.info("testDBQUsingUpdatedFieldFromDroppedUpdate: This test passed fine...");
+  }
+  
+}