You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by no...@apache.org on 2013/08/08 11:06:35 UTC

svn commit: r1511633 - in /lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud: CollectionsAPIDistributedZkTest.java CustomCollectionTest.java

Author: noble
Date: Thu Aug  8 09:06:34 2013
New Revision: 1511633

URL: http://svn.apache.org/r1511633
Log:
SOLR-5113

Added:
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
Modified:
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java?rev=1511633&r1=1511632&r2=1511633&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java Thu Aug  8 09:06:34 2013
@@ -22,14 +22,12 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.util._TestUtil;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrServer;
 import org.apache.solr.client.solrj.impl.HttpSolrServer;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
 import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.CoreAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrException;
@@ -37,8 +35,6 @@ import org.apache.solr.common.SolrExcept
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
@@ -57,7 +53,6 @@ import org.apache.solr.update.SolrCmdDis
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 
 import javax.management.MBeanServer;
 import javax.management.MBeanServerFactory;
@@ -82,17 +77,12 @@ import java.util.concurrent.SynchronousQ
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import static org.apache.solr.cloud.OverseerCollectionProcessor.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.cloud.OverseerCollectionProcessor.REPLICATION_FACTOR;
-import static org.apache.solr.cloud.OverseerCollectionProcessor.ROUTER;
-import static org.apache.solr.cloud.OverseerCollectionProcessor.SHARDS_PROP;
-import static org.apache.solr.common.params.ShardParams._ROUTE_;
 
 /**
  * Tests the Cloud Collections API.
  */
 @Slow
-@Ignore("SOLR-5113")
 public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBase {
   
   private static final String DEFAULT_COLLECTION = "collection1";
@@ -158,7 +148,6 @@ public class CollectionsAPIDistributedZk
     testErrorHandling();
     deletePartiallyCreatedCollection();
     deleteCollectionWithDownNodes();
-    testCustomCollectionsAPI();
     if (DEBUG) {
       super.printLayout();
     }
@@ -629,208 +618,6 @@ public class CollectionsAPIDistributedZk
     
     checkNoTwoShardsUseTheSameIndexDir();
   }
-  private void testCustomCollectionsAPI() throws Exception {
-    String COLL_PREFIX = "new_implicit_coll_";
-
-    // TODO: fragile - because we dont pass collection.confName, it will only
-    // find a default if a conf set with a name matching the collection name is found, or
-    // if there is only one conf set. That and the fact that other tests run first in this
-    // env make this pretty fragile
-
-    // create new collections rapid fire
-    Map<String,List<Integer>> collectionInfos = new HashMap<String,List<Integer>>();
-    int cnt = random().nextInt(6) + 1;
-
-    for (int i = 0; i < cnt; i++) {
-      int numShards = 4;
-      int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 2;
-      int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
-          .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-
-      CloudSolrServer client = null;
-      try {
-        if (i == 0) {
-          // Test if we can create a collection through CloudSolrServer where
-          // you havnt set default-collection
-          // This is nice because you want to be able to create you first
-          // collection using CloudSolrServer, and in such case there is
-          // nothing reasonable to set as default-collection
-          client = createCloudClient(null);
-        } else if (i == 1) {
-          // Test if we can create a collection through CloudSolrServer where
-          // you have set default-collection to a non-existing collection
-          // This is nice because you want to be able to create you first
-          // collection using CloudSolrServer, and in such case there is
-          // nothing reasonable to set as default-collection, but you might want
-          // to use the same CloudSolrServer throughout the entire
-          // lifetime of your client-application, so it is nice to be able to
-          // set a default-collection on this CloudSolrServer once and for all
-          // and use this CloudSolrServer to create the collection
-          client = createCloudClient(COLL_PREFIX + i);
-        }
-
-        Map<String, Object> props = OverseerCollectionProcessor.asMap(
-            ROUTER, ImplicitDocRouter.NAME,
-            REPLICATION_FACTOR, replicationFactor,
-            MAX_SHARDS_PER_NODE, maxShardsPerNode,
-            SHARDS_PROP,"a,b,c,d");
-
-        createCollection(collectionInfos, COLL_PREFIX + i,props,client);
-      } finally {
-        if (client != null) client.shutdown();
-      }
-    }
-
-    Set<Entry<String,List<Integer>>> collectionInfosEntrySet = collectionInfos.entrySet();
-    for (Entry<String,List<Integer>> entry : collectionInfosEntrySet) {
-      String collection = entry.getKey();
-      List<Integer> list = entry.getValue();
-      checkForCollection(collection, list, null);
-
-      String url = getUrlFromZk(collection);
-
-      HttpSolrServer collectionClient = new HttpSolrServer(url);
-
-      // poll for a second - it can take a moment before we are ready to serve
-      waitForNon403or404or503(collectionClient);
-    }
-    ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
-    for (int j = 0; j < cnt; j++) {
-      waitForRecoveriesToFinish(COLL_PREFIX + j, zkStateReader, false);
-    }
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    DocCollection coll = clusterState.getCollection(COLL_PREFIX + 0);
-    assertEquals("implicit", coll.getStr(ROUTER));
-    assertNotNull(coll.getStr(REPLICATION_FACTOR));
-    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
-
-    List<String> collectionNameList = new ArrayList<String>();
-    collectionNameList.addAll(collectionInfos.keySet());
-    log.info("Collections created : "+collectionNameList );
-
-    String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));
-
-    String url = getUrlFromZk(collectionName);
-
-    HttpSolrServer collectionClient = new HttpSolrServer(url);
-
-
-    // lets try and use the solrj client to index a couple documents
-
-    collectionClient.add(getDoc(id, 6, i1, -600, tlong, 600, t1,
-        "humpty dumpy sat on a wall", _ROUTE_,"a"));
-
-    collectionClient.add(getDoc(id, 7, i1, -600, tlong, 600, t1,
-        "humpty dumpy3 sat on a walls", _ROUTE_,"a"));
-
-    collectionClient.add(getDoc(id, 8, i1, -600, tlong, 600, t1,
-        "humpty dumpy2 sat on a walled", _ROUTE_,"a"));
-
-    collectionClient.commit();
-
-    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","b")).getResults().getNumFound());
-    assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","a")).getResults().getNumFound());
-
-    collectionClient.deleteByQuery("*:*");
-    collectionClient.commit(true,true);
-    assertEquals(0, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-
-    UpdateRequest up = new UpdateRequest();
-    up.setParam(_ROUTE_, "c");
-    up.setParam("commit","true");
-
-    up.add(getDoc(id, 9, i1, -600, tlong, 600, t1,
-        "humpty dumpy sat on a wall"));
-    up.add(getDoc(id, 10, i1, -600, tlong, 600, t1,
-        "humpty dumpy3 sat on a walls"));
-    up.add(getDoc(id, 11, i1, -600, tlong, 600, t1,
-        "humpty dumpy2 sat on a walled"));
-
-    collectionClient.request(up);
-
-    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","a")).getResults().getNumFound());
-    assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","c")).getResults().getNumFound());
-
-    //Testing CREATESHARD
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATESHARD.toString());
-    params.set("collection", coll.getName());
-    params.set("shard", "z");
-    SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))).request(request);
-    waitForCollection(zkStateReader,coll.getName(),5);
-    collectionClient.add(getDoc(id, 66, i1, -600, tlong, 600, t1,
-        "humpty dumpy sat on a wall", _ROUTE_,"x"));
-    collectionClient.commit();
-    assertEquals(1, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_,"x")).getResults().getNumFound());
-
-
-    int numShards = 4;
-    int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
-        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-
-    CloudSolrServer client = null;
-    String shard_fld = "shard_s";
-    try {
-      client = createCloudClient(null);
-      Map<String, Object> props = OverseerCollectionProcessor.asMap(
-          ROUTER, ImplicitDocRouter.NAME,
-          REPLICATION_FACTOR, replicationFactor,
-          MAX_SHARDS_PER_NODE, maxShardsPerNode,
-          SHARDS_PROP,"a,b,c,d",
-          DocRouter.ROUTE_FIELD, shard_fld);
-
-      collectionName = COLL_PREFIX + "withShardField";
-      createCollection(collectionInfos, collectionName,props,client);
-    } finally {
-      if (client != null) client.shutdown();
-    }
-
-    List<Integer> list = collectionInfos.get(collectionName);
-    checkForCollection(collectionName, list, null);
-
-
-    url = getUrlFromZk(collectionName);
-
-    collectionClient = new HttpSolrServer(url);
-
-    // poll for a second - it can take a moment before we are ready to serve
-    waitForNon403or404or503(collectionClient);
-
-
-
-
-    collectionClient = new HttpSolrServer(url);
-
-
-    // lets try and use the solrj client to index a couple documents
-
-    collectionClient.add(getDoc(id, 6, i1, -600, tlong, 600, t1,
-        "humpty dumpy sat on a wall", shard_fld,"a"));
-
-    collectionClient.add(getDoc(id, 7, i1, -600, tlong, 600, t1,
-        "humpty dumpy3 sat on a walls", shard_fld,"a"));
-
-    collectionClient.add(getDoc(id, 8, i1, -600, tlong, 600, t1,
-        "humpty dumpy2 sat on a walled", shard_fld,"a"));
-
-    collectionClient.commit();
-
-    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","b")).getResults().getNumFound());
-    //TODO debug the following case
-    assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys", "a")).getResults().getNumFound());
-
-
-  }
 
   private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
     

Added: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java?rev=1511633&view=auto
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java (added)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java Thu Aug  8 09:06:34 2013
@@ -0,0 +1,435 @@
+package org.apache.solr.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.Constants;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util._TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrServer;
+import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.update.DirectUpdateHandler2;
+import org.apache.solr.update.SolrCmdDistributor.Request;
+import org.apache.solr.util.DefaultSolrThreadFactory;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.solr.cloud.OverseerCollectionProcessor.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.REPLICATION_FACTOR;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.ROUTER;
+import static org.apache.solr.cloud.OverseerCollectionProcessor.SHARDS_PROP;
+import static org.apache.solr.common.params.ShardParams._ROUTE_;
+
+/**
+ * Tests the Custom Sharding API.
+ */
+@Slow
+public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
+
+  private static final String DEFAULT_COLLECTION = "collection1";
+  private static final boolean DEBUG = false;
+
+  ThreadPoolExecutor executor = new ThreadPoolExecutor(0,
+      Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
+      new DefaultSolrThreadFactory("testExecutor"));
+
+  CompletionService<Request> completionService;
+  Set<Future<Request>> pending;
+
+  @BeforeClass
+  public static void beforeThisClass2() throws Exception {
+    assumeFalse("FIXME: This test fails under Java 8 all the time, see SOLR-4711", Constants.JRE_IS_MINIMUM_JAVA8);
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    System.setProperty("numShards", Integer.toString(sliceCount));
+    System.setProperty("solr.xml.persist", "true");
+  }
+
+  protected String getSolrXml() {
+    return "solr-no-core.xml";
+  }
+
+
+  public CustomCollectionTest() {
+    fixShardCount = true;
+
+    sliceCount = 2;
+    shardCount = 4;
+    completionService = new ExecutorCompletionService<Request>(executor);
+    pending = new HashSet<Future<Request>>();
+    checkCreatedVsState = false;
+
+  }
+
+  @Override
+  protected void setDistributedParams(ModifiableSolrParams params) {
+
+    if (r.nextBoolean()) {
+      // don't set shards, let that be figured out from the cloud state
+    } else {
+      // use shard ids rather than physical locations
+      StringBuilder sb = new StringBuilder();
+      for (int i = 0; i < shardCount; i++) {
+        if (i > 0)
+          sb.append(',');
+        sb.append("shard" + (i + 3));
+      }
+      params.set("shards", sb.toString());
+    }
+  }
+
+  @Override
+  public void doTest() throws Exception {
+    testCustomCollectionsAPI();
+    if (DEBUG) {
+      super.printLayout();
+    }
+  }
+
+
+  private void testCustomCollectionsAPI() throws Exception {
+    String COLL_PREFIX = "implicitcoll";
+
+    // TODO: fragile - because we dont pass collection.confName, it will only
+    // find a default if a conf set with a name matching the collection name is found, or
+    // if there is only one conf set. That and the fact that other tests run first in this
+    // env make this pretty fragile
+
+    // create new collections rapid fire
+    Map<String,List<Integer>> collectionInfos = new HashMap<String,List<Integer>>();
+    int replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 2;
+
+    int cnt = random().nextInt(6) + 1;
+
+    for (int i = 0; i < cnt; i++) {
+      int numShards = 3;
+      int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
+          .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+
+
+      CloudSolrServer client = null;
+      try {
+        if (i == 0) {
+          // Test if we can create a collection through CloudSolrServer where
+          // you havnt set default-collection
+          // This is nice because you want to be able to create you first
+          // collection using CloudSolrServer, and in such case there is
+          // nothing reasonable to set as default-collection
+          client = createCloudClient(null);
+        } else if (i == 1) {
+          // Test if we can create a collection through CloudSolrServer where
+          // you have set default-collection to a non-existing collection
+          // This is nice because you want to be able to create you first
+          // collection using CloudSolrServer, and in such case there is
+          // nothing reasonable to set as default-collection, but you might want
+          // to use the same CloudSolrServer throughout the entire
+          // lifetime of your client-application, so it is nice to be able to
+          // set a default-collection on this CloudSolrServer once and for all
+          // and use this CloudSolrServer to create the collection
+          client = createCloudClient(COLL_PREFIX + i);
+        }
+
+        Map<String, Object> props = OverseerCollectionProcessor.asMap(
+            ROUTER, ImplicitDocRouter.NAME,
+            REPLICATION_FACTOR, replicationFactor,
+            MAX_SHARDS_PER_NODE, maxShardsPerNode,
+            SHARDS_PROP,"a,b,c");
+
+        createCollection(collectionInfos, COLL_PREFIX + i,props,client);
+      } finally {
+        if (client != null) client.shutdown();
+      }
+    }
+
+    Set<Entry<String,List<Integer>>> collectionInfosEntrySet = collectionInfos.entrySet();
+    for (Entry<String,List<Integer>> entry : collectionInfosEntrySet) {
+      String collection = entry.getKey();
+      List<Integer> list = entry.getValue();
+      checkForCollection(collection, list, null);
+
+      String url = getUrlFromZk(collection);
+
+      HttpSolrServer collectionClient = new HttpSolrServer(url);
+
+      // poll for a second - it can take a moment before we are ready to serve
+      waitForNon403or404or503(collectionClient);
+    }
+    ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
+    for (int j = 0; j < cnt; j++) {
+      waitForRecoveriesToFinish(COLL_PREFIX + j, zkStateReader, false);
+    }
+
+    ClusterState clusterState = zkStateReader.getClusterState();
+
+    DocCollection coll = clusterState.getCollection(COLL_PREFIX + 0);
+    assertEquals("implicit", coll.getStr(ROUTER));
+    assertNotNull(coll.getStr(REPLICATION_FACTOR));
+    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
+
+    List<String> collectionNameList = new ArrayList<String>();
+    collectionNameList.addAll(collectionInfos.keySet());
+    log.info("Collections created : "+collectionNameList );
+
+    String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));
+
+    String url = getUrlFromZk(collectionName);
+
+    HttpSolrServer collectionClient = new HttpSolrServer(url);
+
+
+    // lets try and use the solrj client to index a couple documents
+
+    collectionClient.add(getDoc(id, 6, i1, -600, tlong, 600, t1,
+        "humpty dumpy sat on a wall", _ROUTE_,"a"));
+
+    collectionClient.add(getDoc(id, 7, i1, -600, tlong, 600, t1,
+        "humpty dumpy3 sat on a walls", _ROUTE_,"a"));
+
+    collectionClient.add(getDoc(id, 8, i1, -600, tlong, 600, t1,
+        "humpty dumpy2 sat on a walled", _ROUTE_,"a"));
+
+    collectionClient.commit();
+
+    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","b")).getResults().getNumFound());
+    assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","a")).getResults().getNumFound());
+
+    collectionClient.deleteByQuery("*:*");
+    collectionClient.commit(true,true);
+    assertEquals(0, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+
+    UpdateRequest up = new UpdateRequest();
+    up.setParam(_ROUTE_, "c");
+    up.setParam("commit","true");
+
+    up.add(getDoc(id, 9, i1, -600, tlong, 600, t1,
+        "humpty dumpy sat on a wall"));
+    up.add(getDoc(id, 10, i1, -600, tlong, 600, t1,
+        "humpty dumpy3 sat on a walls"));
+    up.add(getDoc(id, 11, i1, -600, tlong, 600, t1,
+        "humpty dumpy2 sat on a walled"));
+
+    collectionClient.request(up);
+
+    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","a")).getResults().getNumFound());
+    assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","c")).getResults().getNumFound());
+
+    //Testing CREATESHARD
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATESHARD.toString());
+    params.set("collection", collectionName);
+    params.set("shard", "x");
+    SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))).request(request);
+    waitForCollection(zkStateReader,collectionName,4);
+    //wait for all the replicas to become active
+    int attempts = 0;
+    while(true){
+      if(attempts>30 ) fail("Not enough active replicas in the shard 'x'");
+      attempts++;
+      int activeReplicaCount = 0;
+      for (Replica x : zkStateReader.getClusterState().getCollection(collectionName).getSlice("x").getReplicas()) {
+        if("active".equals(x.getStr("state"))) activeReplicaCount++;
+      }
+      Thread.sleep(500);
+      if(activeReplicaCount >= replicationFactor) break;
+    }
+    log.info(zkStateReader.getClusterState().toString());
+
+    collectionClient.add(getDoc(id, 66, i1, -600, tlong, 600, t1,
+        "humpty dumpy sat on a wall", _ROUTE_,"x"));
+    collectionClient.commit();
+    assertEquals(1, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","x")).getResults().getNumFound());
+
+
+    int numShards = 4;
+    replicationFactor = _TestUtil.nextInt(random(), 0, 3) + 2;
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
+        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+
+
+    CloudSolrServer client = null;
+    String shard_fld = "shard_s";
+    try {
+      client = createCloudClient(null);
+      Map<String, Object> props = OverseerCollectionProcessor.asMap(
+          ROUTER, ImplicitDocRouter.NAME,
+          REPLICATION_FACTOR, replicationFactor,
+          MAX_SHARDS_PER_NODE, maxShardsPerNode,
+          SHARDS_PROP,"a,b,c,d",
+          DocRouter.ROUTE_FIELD, shard_fld);
+
+      collectionName = COLL_PREFIX + "withShardField";
+      createCollection(collectionInfos, collectionName,props,client);
+    } finally {
+      if (client != null) client.shutdown();
+    }
+
+    List<Integer> list = collectionInfos.get(collectionName);
+    checkForCollection(collectionName, list, null);
+
+
+    url = getUrlFromZk(collectionName);
+
+    collectionClient = new HttpSolrServer(url);
+
+    // poll for a second - it can take a moment before we are ready to serve
+    waitForNon403or404or503(collectionClient);
+
+
+
+
+    collectionClient = new HttpSolrServer(url);
+
+
+    // lets try and use the solrj client to index a couple documents
+
+    collectionClient.add(getDoc(id, 6, i1, -600, tlong, 600, t1,
+        "humpty dumpy sat on a wall", shard_fld,"a"));
+
+    collectionClient.add(getDoc(id, 7, i1, -600, tlong, 600, t1,
+        "humpty dumpy3 sat on a walls", shard_fld,"a"));
+
+    collectionClient.add(getDoc(id, 8, i1, -600, tlong, 600, t1,
+        "humpty dumpy2 sat on a walled", shard_fld,"a"));
+
+    collectionClient.commit();
+
+    assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys","b")).getResults().getNumFound());
+    //TODO debug the following case
+    assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam("shard.keys", "a")).getResults().getNumFound());
+
+
+  }
+
+
+
+
+  private String getUrlFromZk(String collection) {
+    ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
+    Map<String,Slice> slices = clusterState.getCollectionStates().get(collection).getSlicesMap();
+
+    if (slices == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
+    }
+
+    for (Map.Entry<String,Slice> entry : slices.entrySet()) {
+      Slice slice = entry.getValue();
+      Map<String,Replica> shards = slice.getReplicasMap();
+      Set<Map.Entry<String,Replica>> shardEntries = shards.entrySet();
+      for (Map.Entry<String,Replica> shardEntry : shardEntries) {
+        final ZkNodeProps node = shardEntry.getValue();
+        if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
+          return ZkCoreNodeProps.getCoreUrl(node.getStr(ZkStateReader.BASE_URL_PROP), collection); //new ZkCoreNodeProps(node).getCoreUrl();
+        }
+      }
+    }
+
+    throw new RuntimeException("Could not find a live node for collection:" + collection);
+  }
+
+  private void waitForNon403or404or503(HttpSolrServer collectionClient)
+      throws Exception {
+    SolrException exp = null;
+    long timeoutAt = System.currentTimeMillis() + 30000;
+
+    while (System.currentTimeMillis() < timeoutAt) {
+      boolean missing = false;
+
+      try {
+        collectionClient.query(new SolrQuery("*:*"));
+      } catch (SolrException e) {
+        if (!(e.code() == 403 || e.code() == 503 || e.code() == 404)) {
+          throw e;
+        }
+        exp = e;
+        missing = true;
+      }
+      if (!missing) {
+        return;
+      }
+      Thread.sleep(50);
+    }
+
+    fail("Could not find the new collection - " + exp.code() + " : " + collectionClient.getBaseURL());
+  }
+
+
+  @Override
+  protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException {
+
+    if (r.nextBoolean())
+      return super.queryServer(params);
+
+    if (r.nextBoolean())
+      params.set("collection",DEFAULT_COLLECTION);
+
+    QueryResponse rsp = getCommonCloudSolrServer().query(params);
+    return rsp;
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    System.clearProperty("numShards");
+    System.clearProperty("zkHost");
+    System.clearProperty("solr.xml.persist");
+
+    // insurance
+    DirectUpdateHandler2.commitOnClose = true;
+  }
+}