You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2017/07/25 09:06:22 UTC

[37/50] [abbrv] lucene-solr:feature/autoscaling: SOLR-10916: Convert tests that extend LuceneTestCase and use MiniSolrCloudCluster to instead extend SolrCloudTestCase

SOLR-10916: Convert tests that extend LuceneTestCase and use MiniSolrCloudCluster to instead extend SolrCloudTestCase


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7328e592
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7328e592
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7328e592

Branch: refs/heads/feature/autoscaling
Commit: 7328e592511bead7d9a9ffdbb8e004b2ff44a1f1
Parents: cdae11e
Author: Steve Rowe <sa...@apache.org>
Authored: Fri Jul 21 18:28:12 2017 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Fri Jul 21 18:28:12 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../cloud/SegmentTerminateEarlyTestState.java   |  82 ++--
 .../solr/cloud/TestAuthenticationFramework.java | 188 +++------
 .../TestCollectionsAPIViaSolrCloudCluster.java  | 298 ++++++++++++++
 .../solr/cloud/TestMiniSolrCloudCluster.java    | 388 -------------------
 .../cloud/TestMiniSolrCloudClusterKerberos.java | 141 -------
 .../cloud/TestSolrCloudWithKerberosAlt.java     | 150 +++----
 .../java/org/apache/solr/SolrTestCaseJ4.java    |  12 +-
 8 files changed, 443 insertions(+), 819 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 07faf9f..c6c4050 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -106,6 +106,9 @@ Other Changes
 
 * SOLR-10338: Configure SecureRandom non blocking for tests. (Mihaly Toth, hossman, Ishan Chattopadhyaya, via Mark Miller)
 
+* SOLR-10916: Convert tests that extend LuceneTestCase and use MiniSolrCloudCluster 
+  to instead extend SolrCloudTestCase. (Steve Rowe)
+
 ==================  7.0.0 ==================
 
 Versions of Major Components

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java b/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
index 118d5ed..5e0bccd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
@@ -92,8 +92,8 @@ class SegmentTerminateEarlyTestState {
   }
 
   void queryTimestampDescending(CloudSolrClient cloudSolrClient) throws Exception {
-    TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
-    TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
+    TestSegmentSorting.assertFalse(maxTimestampDocKeys.isEmpty());
+    TestSegmentSorting.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
     final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
     query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
@@ -102,24 +102,24 @@ class SegmentTerminateEarlyTestState {
     // CommonParams.SEGMENT_TERMINATE_EARLY parameter intentionally absent
     final QueryResponse rsp = cloudSolrClient.query(query);
     // check correctness of the results count
-    TestMiniSolrCloudCluster.assertEquals("numFound", numDocs/2, rsp.getResults().getNumFound());
+    TestSegmentSorting.assertEquals("numFound", numDocs/2, rsp.getResults().getNumFound());
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
       final Integer idAsInt = Integer.parseInt(solrDocument0.getFieldValue(KEY_FIELD).toString());
-      TestMiniSolrCloudCluster.assertTrue
+      TestSegmentSorting.assertTrue
         (KEY_FIELD +"="+idAsInt+" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
          maxTimestampDocKeys.contains(idAsInt));
-      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
+      TestSegmentSorting.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
-    TestMiniSolrCloudCluster.assertNull("responseHeader.segmentTerminatedEarly present in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertNull("responseHeader.segmentTerminatedEarly present in "+rsp.getResponseHeader(),
         rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
   }
 
   void queryTimestampDescendingSegmentTerminateEarlyYes(CloudSolrClient cloudSolrClient) throws Exception {
-    TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
-    TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
+    TestSegmentSorting.assertFalse(maxTimestampDocKeys.isEmpty());
+    TestSegmentSorting.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
     final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
     query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
@@ -133,28 +133,28 @@ class SegmentTerminateEarlyTestState {
     query.set(CommonParams.SEGMENT_TERMINATE_EARLY, true);
     final QueryResponse rsp = cloudSolrClient.query(query);
     // check correctness of the results count
-    TestMiniSolrCloudCluster.assertTrue("numFound", rowsWanted <= rsp.getResults().getNumFound());
-    TestMiniSolrCloudCluster.assertTrue("numFound", rsp.getResults().getNumFound() <= numDocs/2);
+    TestSegmentSorting.assertTrue("numFound", rowsWanted <= rsp.getResults().getNumFound());
+    TestSegmentSorting.assertTrue("numFound", rsp.getResults().getNumFound() <= numDocs/2);
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
       final Integer idAsInt = Integer.parseInt(solrDocument0.getFieldValue(KEY_FIELD).toString());
-      TestMiniSolrCloudCluster.assertTrue
+      TestSegmentSorting.assertTrue
         (KEY_FIELD +"="+idAsInt+" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
          maxTimestampDocKeys.contains(idAsInt));
-      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, rsp.getResults().get(0).getFieldValue(ODD_FIELD));
+      TestSegmentSorting.assertEquals(ODD_FIELD, oddFieldValue, rsp.getResults().get(0).getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
-    TestMiniSolrCloudCluster.assertNotNull("responseHeader.segmentTerminatedEarly missing in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertNotNull("responseHeader.segmentTerminatedEarly missing in "+rsp.getResponseHeader(),
         rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
-    TestMiniSolrCloudCluster.assertTrue("responseHeader.segmentTerminatedEarly missing/false in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertTrue("responseHeader.segmentTerminatedEarly missing/false in "+rsp.getResponseHeader(),
         Boolean.TRUE.equals(rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY)));
     // check shards info
     final Object shardsInfo = rsp.getResponse().get(ShardParams.SHARDS_INFO);
     if (!Boolean.TRUE.equals(shardsInfoWanted)) {
-      TestMiniSolrCloudCluster.assertNull(ShardParams.SHARDS_INFO, shardsInfo);
+      TestSegmentSorting.assertNull(ShardParams.SHARDS_INFO, shardsInfo);
     } else {
-      TestMiniSolrCloudCluster.assertNotNull(ShardParams.SHARDS_INFO, shardsInfo);
+      TestSegmentSorting.assertNotNull(ShardParams.SHARDS_INFO, shardsInfo);
       int segmentTerminatedEarlyShardsCount = 0;
       for (Map.Entry<String, ?> si : (SimpleOrderedMap<?>)shardsInfo) {
         if (Boolean.TRUE.equals(((SimpleOrderedMap)si.getValue()).get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY))) {
@@ -162,14 +162,14 @@ class SegmentTerminateEarlyTestState {
         }
       }
       // check segmentTerminatedEarly flag within shards info
-      TestMiniSolrCloudCluster.assertTrue(segmentTerminatedEarlyShardsCount+" shards reported "+SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY,
+      TestSegmentSorting.assertTrue(segmentTerminatedEarlyShardsCount+" shards reported "+SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY,
           (0<segmentTerminatedEarlyShardsCount));
     }
   }
 
   void queryTimestampDescendingSegmentTerminateEarlyNo(CloudSolrClient cloudSolrClient) throws Exception {
-    TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
-    TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
+    TestSegmentSorting.assertFalse(maxTimestampDocKeys.isEmpty());
+    TestSegmentSorting.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
     final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
     query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
@@ -182,71 +182,71 @@ class SegmentTerminateEarlyTestState {
     query.set(CommonParams.SEGMENT_TERMINATE_EARLY, false);
     final QueryResponse rsp = cloudSolrClient.query(query);
     // check correctness of the results count
-    TestMiniSolrCloudCluster.assertEquals("numFound", numDocs/2, rsp.getResults().getNumFound());
+    TestSegmentSorting.assertEquals("numFound", numDocs/2, rsp.getResults().getNumFound());
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
       final Integer idAsInt = Integer.parseInt(solrDocument0.getFieldValue(KEY_FIELD).toString());
-      TestMiniSolrCloudCluster.assertTrue
+      TestSegmentSorting.assertTrue
         (KEY_FIELD +"="+idAsInt+" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
          maxTimestampDocKeys.contains(idAsInt));
-      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, rsp.getResults().get(0).getFieldValue(ODD_FIELD));
+      TestSegmentSorting.assertEquals(ODD_FIELD, oddFieldValue, rsp.getResults().get(0).getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
-    TestMiniSolrCloudCluster.assertNull("responseHeader.segmentTerminatedEarly present in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertNull("responseHeader.segmentTerminatedEarly present in "+rsp.getResponseHeader(),
         rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
-    TestMiniSolrCloudCluster.assertFalse("responseHeader.segmentTerminatedEarly present/true in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertFalse("responseHeader.segmentTerminatedEarly present/true in "+rsp.getResponseHeader(),
         Boolean.TRUE.equals(rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY)));
     // check shards info
     final Object shardsInfo = rsp.getResponse().get(ShardParams.SHARDS_INFO);
     if (!Boolean.TRUE.equals(shardsInfoWanted)) {
-      TestMiniSolrCloudCluster.assertNull(ShardParams.SHARDS_INFO, shardsInfo);
+      TestSegmentSorting.assertNull(ShardParams.SHARDS_INFO, shardsInfo);
     } else {
-      TestMiniSolrCloudCluster.assertNotNull(ShardParams.SHARDS_INFO, shardsInfo);
+      TestSegmentSorting.assertNotNull(ShardParams.SHARDS_INFO, shardsInfo);
       int segmentTerminatedEarlyShardsCount = 0;
       for (Map.Entry<String, ?> si : (SimpleOrderedMap<?>)shardsInfo) {
         if (Boolean.TRUE.equals(((SimpleOrderedMap)si.getValue()).get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY))) {
           segmentTerminatedEarlyShardsCount += 1;
         }
       }
-      TestMiniSolrCloudCluster.assertEquals("shards reporting "+SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY,
+      TestSegmentSorting.assertEquals("shards reporting "+SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY,
           0, segmentTerminatedEarlyShardsCount);
     }
   }
 
   void queryTimestampDescendingSegmentTerminateEarlyYesGrouped(CloudSolrClient cloudSolrClient) throws Exception {
-    TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
-    TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
+    TestSegmentSorting.assertFalse(maxTimestampDocKeys.isEmpty());
+    TestSegmentSorting.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
     final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
     query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
     query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
     query.setRows(1);
     query.set(CommonParams.SEGMENT_TERMINATE_EARLY, true);
-    TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not quad-able", (numDocs%4)==0);
+    TestSegmentSorting.assertTrue("numDocs="+numDocs+" is not quad-able", (numDocs%4)==0);
     query.add("group.field", QUAD_FIELD);
     query.set("group", true);
     final QueryResponse rsp = cloudSolrClient.query(query);
     // check correctness of the results count
-    TestMiniSolrCloudCluster.assertEquals("matches", numDocs/2, rsp.getGroupResponse().getValues().get(0).getMatches());
+    TestSegmentSorting.assertEquals("matches", numDocs/2, rsp.getGroupResponse().getValues().get(0).getMatches());
     // check correctness of the first result
     if (rsp.getGroupResponse().getValues().get(0).getMatches() > 0) {
       final SolrDocument solrDocument = rsp.getGroupResponse().getValues().get(0).getValues().get(0).getResult().get(0);
       final Integer idAsInt = Integer.parseInt(solrDocument.getFieldValue(KEY_FIELD).toString());
-      TestMiniSolrCloudCluster.assertTrue
+      TestSegmentSorting.assertTrue
         (KEY_FIELD +"="+idAsInt+" of ("+solrDocument+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
          maxTimestampDocKeys.contains(idAsInt));
-      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument.getFieldValue(ODD_FIELD));
+      TestSegmentSorting.assertEquals(ODD_FIELD, oddFieldValue, solrDocument.getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
     // at present segmentTerminateEarly cannot be used with grouped queries
-    TestMiniSolrCloudCluster.assertFalse("responseHeader.segmentTerminatedEarly present/true in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertFalse("responseHeader.segmentTerminatedEarly present/true in "+rsp.getResponseHeader(),
         Boolean.TRUE.equals(rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY)));
   }
 
   void queryTimestampAscendingSegmentTerminateEarlyYes(CloudSolrClient cloudSolrClient) throws Exception {
-    TestMiniSolrCloudCluster.assertFalse(minTimestampDocKeys.isEmpty());
-    TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
+    TestSegmentSorting.assertFalse(minTimestampDocKeys.isEmpty());
+    TestSegmentSorting.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(minTimestampDocKeys.iterator().next().intValue()%2);
     final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
     query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.asc); // a sort order that is _not_ compatible with the merge sort order
@@ -255,21 +255,21 @@ class SegmentTerminateEarlyTestState {
     query.set(CommonParams.SEGMENT_TERMINATE_EARLY, true);
     final QueryResponse rsp = cloudSolrClient.query(query);
     // check correctness of the results count
-    TestMiniSolrCloudCluster.assertEquals("numFound", numDocs/2, rsp.getResults().getNumFound());
+    TestSegmentSorting.assertEquals("numFound", numDocs/2, rsp.getResults().getNumFound());
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
       final Integer idAsInt = Integer.parseInt(solrDocument0.getFieldValue(KEY_FIELD).toString());
-      TestMiniSolrCloudCluster.assertTrue
+      TestSegmentSorting.assertTrue
         (KEY_FIELD +"="+idAsInt+" of ("+solrDocument0+") is not in minTimestampDocKeys("+minTimestampDocKeys+")",
          minTimestampDocKeys.contains(idAsInt));
-      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
+      TestSegmentSorting.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
-    TestMiniSolrCloudCluster.assertNotNull("responseHeader.segmentTerminatedEarly missing in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertNotNull("responseHeader.segmentTerminatedEarly missing in "+rsp.getResponseHeader(),
         rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY));
     // segmentTerminateEarly cannot be used with incompatible sort orders
-    TestMiniSolrCloudCluster.assertTrue("responseHeader.segmentTerminatedEarly missing/true in "+rsp.getResponseHeader(),
+    TestSegmentSorting.assertTrue("responseHeader.segmentTerminatedEarly missing/true in "+rsp.getResponseHeader(),
         Boolean.FALSE.equals(rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY)));
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java b/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java
index 8159e49..4f0076c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestAuthenticationFramework.java
@@ -21,42 +21,21 @@ import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
-import org.apache.http.HttpException;
-import org.apache.http.HttpRequest;
 import org.apache.http.HttpRequestInterceptor;
-import org.apache.http.protocol.HttpContext;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
-import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.embedded.JettyConfig;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.index.TieredMergePolicyFactory;
 import org.apache.solr.security.AuthenticationPlugin;
 import org.apache.solr.security.HttpClientBuilderPlugin;
-import org.apache.solr.util.RevertDefaultThreadHandlerRule;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.ClassRule;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.RuleChain;
-import org.junit.rules.TestRule;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,30 +43,23 @@ import org.slf4j.LoggerFactory;
  * Test of the MiniSolrCloudCluster functionality with authentication enabled.
  */
 @LuceneTestCase.Slow
-@SuppressSysoutChecks(bugUrl = "Solr logs to JUL")
-public class TestAuthenticationFramework extends LuceneTestCase {
+public class TestAuthenticationFramework extends SolrCloudTestCase {
   
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private int NUM_SERVERS = 5;
-  private int NUM_SHARDS = 2;
-  private int REPLICATION_FACTOR = 2;
-  
+  private static final int numShards = 2;
+  private static final int numReplicas = 2;
+  private static final int maxShardsPerNode = 2;
+  private static final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
+  private static final String configName = "solrCloudCollectionConfig";
+  private static final String collectionName = "testcollection";
+
   static String requestUsername = MockAuthenticationPlugin.expectedUsername;
   static String requestPassword = MockAuthenticationPlugin.expectedPassword;
 
-  @Rule
-  public TestRule solrTestRules = RuleChain
-      .outerRule(new SystemPropertiesRestoreRule());
-
-  @ClassRule
-  public static TestRule solrClassRules = RuleChain.outerRule(
-      new SystemPropertiesRestoreRule()).around(
-      new RevertDefaultThreadHandlerRule());
-
-  @Before
+  @Override
   public void setUp() throws Exception {
-    SolrTestCaseJ4.randomizeNumericTypesProperties(); // SOLR-10916
     setupAuthenticationPlugin();
+    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
     super.setUp();
   }
   
@@ -99,120 +71,67 @@ public class TestAuthenticationFramework extends LuceneTestCase {
   
   @Test
   public void testBasics() throws Exception {
+    collectionCreateSearchDeleteTwice();
 
-    MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
-    try {
-      // Should pass
-      collectionCreateSearchDelete(miniCluster);
-
-      MockAuthenticationPlugin.expectedUsername = "solr";
-      MockAuthenticationPlugin.expectedPassword = "s0lrRocks";
+    MockAuthenticationPlugin.expectedUsername = "solr";
+    MockAuthenticationPlugin.expectedPassword = "s0lrRocks";
 
-      // Should fail with 401
-      try {
-        collectionCreateSearchDelete(miniCluster);
+    // Should fail with 401
+    try {
+      collectionCreateSearchDeleteTwice();
+      fail("Should've returned a 401 error");
+    } catch (Exception ex) {
+      if (!ex.getMessage().contains("Error 401")) {
         fail("Should've returned a 401 error");
-      } catch (Exception ex) {
-        if (!ex.getMessage().contains("Error 401")) {
-          fail("Should've returned a 401 error");
-        }
-      } finally {
-        MockAuthenticationPlugin.expectedUsername = null;
-        MockAuthenticationPlugin.expectedPassword = null;
       }
     } finally {
-      miniCluster.shutdown();
+      MockAuthenticationPlugin.expectedUsername = null;
+      MockAuthenticationPlugin.expectedPassword = null;
     }
   }
 
-  @After
+  @Override
   public void tearDown() throws Exception {
-    SolrTestCaseJ4.clearNumericTypesProperties(); // SOLR-10916
     System.clearProperty("authenticationPlugin");
     super.tearDown();
   }
 
-  private MiniSolrCloudCluster createMiniSolrCloudCluster() throws Exception {
-    JettyConfig.Builder jettyConfig = JettyConfig.builder();
-    jettyConfig.waitForLoadingCoresToFinish(null);
-    return new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), jettyConfig.build());
-  }
-
-  private void createCollection(MiniSolrCloudCluster miniCluster, String collectionName, String asyncId)
+  private void createCollection(String collectionName)
       throws Exception {
-    String configName = "solrCloudCollectionConfig";
-    miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
-
-    final boolean persistIndex = random().nextBoolean();
-    Map<String, String>  collectionProperties = new HashMap<>();
-
-    collectionProperties.putIfAbsent(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
-    collectionProperties.putIfAbsent("solr.tests.maxBufferedDocs", "100000");
-    collectionProperties.putIfAbsent("solr.tests.ramBufferSizeMB", "100");
-    // use non-test classes so RandomizedRunner isn't necessary
-    collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
-    collectionProperties.putIfAbsent("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
-    collectionProperties.putIfAbsent("solr.directoryFactory", (persistIndex ? "solr.StandardDirectoryFactory" : "solr.RAMDirectoryFactory"));
-
-    if (asyncId == null) {
-      CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR)
-          .setProperties(collectionProperties)
-          .process(miniCluster.getSolrClient());
+    if (random().nextBoolean()) {  // process asynchronously
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .processAndWait(cluster.getSolrClient(), 90);
     }
     else {
-      CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR)
-          .setProperties(collectionProperties)
-          .processAndWait(miniCluster.getSolrClient(), 30);
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .process(cluster.getSolrClient());
     }
-
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish
+        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
   }
 
-  public void collectionCreateSearchDelete(MiniSolrCloudCluster miniCluster) throws Exception {
-
-    final String collectionName = "testcollection";
-
-    final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
+  public void collectionCreateSearchDeleteTwice() throws Exception {
+    final CloudSolrClient client = cluster.getSolrClient();
 
-    assertNotNull(miniCluster.getZkServer());
-    List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
-    assertEquals(NUM_SERVERS, jettys.size());
-    for (JettySolrRunner jetty : jettys) {
-      assertTrue(jetty.isRunning());
-    }
-
-    // create collection
-    log.info("#### Creating a collection");
-    final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
-    createCollection(miniCluster, collectionName, asyncId);
-
-    ZkStateReader zkStateReader = miniCluster.getSolrClient().getZkStateReader();
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
+    for (int i = 0 ; i < 2 ; ++i) {
+      // create collection
+      createCollection(collectionName);
 
-    // modify/query collection
-    log.info("#### updating a querying collection");
-    cloudSolrClient.setDefaultCollection(collectionName);
-    SolrInputDocument doc = new SolrInputDocument();
-    doc.setField("id", "1");
-    cloudSolrClient.add(doc);
-    cloudSolrClient.commit();
-    SolrQuery query = new SolrQuery();
-    query.setQuery("*:*");
-    QueryResponse rsp = cloudSolrClient.query(query);
-    assertEquals(1, rsp.getResults().getNumFound());
+      // check that there's no left-over state
+      assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
 
-    // delete the collection we created earlier
-    CollectionAdminRequest.deleteCollection(collectionName).process(miniCluster.getSolrClient());
+      // modify/query collection
+      new UpdateRequest().add("id", "1").commit(client, collectionName);
+      QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
+      assertEquals(1, rsp.getResults().getNumFound());
 
-    // create it again
-    String asyncId2 = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
-    createCollection(miniCluster, collectionName, asyncId2);
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-    // check that there's no left-over state
-    assertEquals(0, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-    cloudSolrClient.add(doc);
-    cloudSolrClient.commit();
-    assertEquals(1, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+      // delete the collection
+      CollectionAdminRequest.deleteCollection(collectionName).process(client);
+      AbstractDistribZkTestBase.waitForCollectionToDisappear
+          (collectionName, client.getZkStateReader(), true, true, 330);
+    }
   }
 
   public static class MockAuthenticationPlugin extends AuthenticationPlugin implements HttpClientBuilderPlugin {
@@ -245,12 +164,9 @@ public class TestAuthenticationFramework extends LuceneTestCase {
 
     @Override
     public SolrHttpClientBuilder getHttpClientBuilder(SolrHttpClientBuilder httpClientBuilder) {
-      interceptor = new HttpRequestInterceptor() {
-        @Override
-        public void process(HttpRequest req, HttpContext rsp) throws HttpException, IOException {
-          req.addHeader("username", requestUsername);
-          req.addHeader("password", requestPassword);
-        }
+      interceptor = (req, rsp) -> {
+        req.addHeader("username", requestUsername);
+        req.addHeader("password", requestPassword);
       };
 
       HttpClientUtil.addRequestInterceptor(interceptor);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
new file mode 100644
index 0000000..c22610d
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test of the Collections API with the MiniSolrCloudCluster.
+ */
+@LuceneTestCase.Slow
+public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static final int numShards = 2;
+  private static final int numReplicas = 2;
+  private static final int maxShardsPerNode = 1;
+  private static final int nodeCount = 5;
+  private static final String configName = "solrCloudCollectionConfig";
+  private static final Map<String,String> collectionProperties  // ensure indexes survive core shutdown
+      = Collections.singletonMap("solr.directoryFactory", "solr.StandardDirectoryFactory");
+
+  @Override
+  public void setUp() throws Exception {
+    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
+    super.setUp();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    cluster.shutdown();
+    super.tearDown();
+  }
+
+  private void createCollection(String collectionName, String createNodeSet) throws Exception {
+    if (random().nextBoolean()) { // process asynchronously
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .processAndWait(cluster.getSolrClient(), 30);
+    }
+    else {
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .process(cluster.getSolrClient());
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish
+        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testCollectionCreateSearchDelete() throws Exception {
+    final CloudSolrClient client = cluster.getSolrClient();
+    final String collectionName = "testcollection";
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    // shut down a server
+    JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
+    assertTrue(stoppedServer.isStopped());
+    assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+
+    // create a server
+    JettySolrRunner startedServer = cluster.startJettySolrRunner();
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    // create collection
+    createCollection(collectionName, null);
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
+    assertEquals(1, rsp.getResults().getNumFound());
+
+    // remove a server not hosting any replicas
+    ZkStateReader zkStateReader = client.getZkStateReader();
+    zkStateReader.forceUpdateCollection(collectionName);
+    ClusterState clusterState = zkStateReader.getClusterState();
+    Map<String,JettySolrRunner> jettyMap = new HashMap<>();
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
+      jettyMap.put(key, jetty);
+    }
+    Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
+    // track the servers not host replicas
+    for (Slice slice : slices) {
+      jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
+      for (Replica replica : slice.getReplicas()) {
+        jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
+      }
+    }
+    assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
+    JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
+    jettys = cluster.getJettySolrRunners();
+    for (int i = 0; i < jettys.size(); ++i) {
+      if (jettys.get(i).equals(jettyToStop)) {
+        cluster.stopJettySolrRunner(i);
+        assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+      }
+    }
+
+    // re-create a server (to restore original nodeCount count)
+    startedServer = cluster.startJettySolrRunner(jettyToStop);
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+
+    // create it again
+    createCollection(collectionName, null);
+
+    // check that there's no left-over state
+    assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    assertEquals(1, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  @Test
+  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
+
+    final String collectionName = "testSolrCloudCollectionWithoutCores";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    assertFalse(cluster.getJettySolrRunners().isEmpty());
+
+    // create collection
+    createCollection(collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY);
+
+    // check the collection's corelessness
+    int coreCount = 0;
+    DocCollection docCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
+    for (Map.Entry<String,Slice> entry : docCollection.getSlicesMap().entrySet()) {
+      coreCount += entry.getValue().getReplicasMap().entrySet().size();
+    }
+    assertEquals(0, coreCount);
+
+    // delete the collection
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testStopAllStartAll() throws Exception {
+
+    final String collectionName = "testStopAllStartAllCollection";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    final SolrQuery query = new SolrQuery("*:*");
+    final SolrInputDocument doc = new SolrInputDocument();
+
+    // create collection
+    createCollection(collectionName, null);
+
+    ZkStateReader zkStateReader = client.getZkStateReader();
+
+    // modify collection
+    final int numDocs = 1 + random().nextInt(10);
+    for (int ii = 1; ii <= numDocs; ++ii) {
+      doc.setField("id", ""+ii);
+      client.add(collectionName, doc);
+      if (ii*2 == numDocs) client.commit(collectionName);
+    }
+    client.commit(collectionName);
+
+    // query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+
+    // the test itself
+    zkStateReader.forceUpdateCollection(collectionName);
+    final ClusterState clusterState = zkStateReader.getClusterState();
+
+    final Set<Integer> leaderIndices = new HashSet<>();
+    final Set<Integer> followerIndices = new HashSet<>();
+    {
+      final Map<String,Boolean> shardLeaderMap = new HashMap<>();
+      for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
+        for (final Replica replica : slice.getReplicas()) {
+          shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
+        }
+        shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
+      }
+      for (int ii = 0; ii < jettys.size(); ++ii) {
+        final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
+        final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
+        final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
+        if (Boolean.TRUE.equals(isLeader)) {
+          leaderIndices.add(ii);
+        } else if (Boolean.FALSE.equals(isLeader)) {
+          followerIndices.add(ii);
+        } // else neither leader nor follower i.e. node without a replica (for our collection)
+      }
+    }
+    final List<Integer> leaderIndicesList = new ArrayList<>(leaderIndices);
+    final List<Integer> followerIndicesList = new ArrayList<>(followerIndices);
+
+    // first stop the followers (in no particular order)
+    Collections.shuffle(followerIndicesList, random());
+    for (Integer ii : followerIndicesList) {
+      if (!leaderIndices.contains(ii)) {
+        cluster.stopJettySolrRunner(jettys.get(ii));
+      }
+    }
+
+    // then stop the leaders (again in no particular order)
+    Collections.shuffle(leaderIndicesList, random());
+    for (Integer ii : leaderIndicesList) {
+      cluster.stopJettySolrRunner(jettys.get(ii));
+    }
+
+    // calculate restart order
+    final List<Integer> restartIndicesList = new ArrayList<>();
+    Collections.shuffle(leaderIndicesList, random());
+    restartIndicesList.addAll(leaderIndicesList);
+    Collections.shuffle(followerIndicesList, random());
+    restartIndicesList.addAll(followerIndicesList);
+    if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
+
+    // and then restart jettys in that order
+    for (Integer ii : restartIndicesList) {
+      final JettySolrRunner jetty = jettys.get(ii);
+      if (!jetty.isRunning()) {
+        cluster.startJettySolrRunner(jetty);
+        assertTrue(jetty.isRunning());
+      }
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
+
+    zkStateReader.forceUpdateCollection(collectionName);
+
+    // re-query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
deleted file mode 100644
index dd4d13c..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.embedded.JettyConfig;
-import org.apache.solr.client.solrj.embedded.JettyConfig.Builder;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.index.TieredMergePolicyFactory;
-import org.apache.solr.util.RevertDefaultThreadHandlerRule;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.RuleChain;
-import org.junit.rules.TestRule;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test of the MiniSolrCloudCluster functionality. Keep in mind, 
- * MiniSolrCloudCluster is designed to be used outside of the Lucene test
- * hierarchy.
- */
-@SuppressSysoutChecks(bugUrl = "Solr logs to JUL")
-public class TestMiniSolrCloudCluster extends LuceneTestCase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected int NUM_SERVERS = 5;
-  protected int NUM_SHARDS = 2;
-  protected int REPLICATION_FACTOR = 2;
-
-  public TestMiniSolrCloudCluster () {
-    NUM_SERVERS = 5;
-    NUM_SHARDS = 2;
-    REPLICATION_FACTOR = 2;
-  }
-  
-  @BeforeClass
-  public static void setupHackNumerics() { // SOLR-10916
-    SolrTestCaseJ4.randomizeNumericTypesProperties();
-  }
-  @AfterClass
-  public static void clearHackNumerics() { // SOLR-10916
-    SolrTestCaseJ4.clearNumericTypesProperties();
-  }
-
-  @Rule
-  public TestRule solrTestRules = RuleChain
-      .outerRule(new SystemPropertiesRestoreRule());
-  
-  @ClassRule
-  public static TestRule solrClassRules = RuleChain.outerRule(
-      new SystemPropertiesRestoreRule()).around(
-      new RevertDefaultThreadHandlerRule());
-  
-  private MiniSolrCloudCluster createMiniSolrCloudCluster() throws Exception {
-    Builder jettyConfig = JettyConfig.builder();
-    jettyConfig.waitForLoadingCoresToFinish(null);
-    return new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), jettyConfig.build());
-  }
-    
-  private void createCollection(MiniSolrCloudCluster miniCluster, String collectionName, String createNodeSet, String asyncId,
-      Boolean indexToPersist, Map<String,String> collectionProperties) throws Exception {
-    String configName = "solrCloudCollectionConfig";
-    miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1").resolve("conf"), configName);
-
-    final boolean persistIndex = (indexToPersist != null ? indexToPersist.booleanValue() : random().nextBoolean());
-    if (collectionProperties == null) {
-      collectionProperties = new HashMap<>();
-    }
-    collectionProperties.putIfAbsent(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
-    collectionProperties.putIfAbsent("solr.tests.maxBufferedDocs", "100000");
-    collectionProperties.putIfAbsent("solr.tests.ramBufferSizeMB", "100");
-    // use non-test classes so RandomizedRunner isn't necessary
-    collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
-    collectionProperties.putIfAbsent("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
-    collectionProperties.putIfAbsent("solr.directoryFactory", (persistIndex ? "solr.StandardDirectoryFactory" : "solr.RAMDirectoryFactory"));
-
-    if (asyncId == null) {
-      CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .process(miniCluster.getSolrClient());
-    }
-    else {
-      CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .processAndWait(miniCluster.getSolrClient(), 30);
-    }
-  }
-
-  @Test
-  public void testCollectionCreateSearchDelete() throws Exception {
-
-    final String collectionName = "testcollection";
-    MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
-
-    final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
-
-    try {
-      assertNotNull(miniCluster.getZkServer());
-      List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
-      assertEquals(NUM_SERVERS, jettys.size());
-      for (JettySolrRunner jetty : jettys) {
-        assertTrue(jetty.isRunning());
-      }
-
-      // shut down a server
-      log.info("#### Stopping a server");
-      JettySolrRunner stoppedServer = miniCluster.stopJettySolrRunner(0);
-      assertTrue(stoppedServer.isStopped());
-      assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
-
-      // create a server
-      log.info("#### Starting a server");
-      JettySolrRunner startedServer = miniCluster.startJettySolrRunner();
-      assertTrue(startedServer.isRunning());
-      assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
-
-      // create collection
-      log.info("#### Creating a collection");
-      final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
-      createCollection(miniCluster, collectionName, null, asyncId, null, null);
-
-      ZkStateReader zkStateReader = miniCluster.getSolrClient().getZkStateReader();
-      AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-      // modify/query collection
-      log.info("#### updating a querying collection");
-      cloudSolrClient.setDefaultCollection(collectionName);
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.setField("id", "1");
-      cloudSolrClient.add(doc);
-      cloudSolrClient.commit();
-      SolrQuery query = new SolrQuery();
-      query.setQuery("*:*");
-      QueryResponse rsp = cloudSolrClient.query(query);
-      assertEquals(1, rsp.getResults().getNumFound());
-
-      // remove a server not hosting any replicas
-      zkStateReader.forceUpdateCollection(collectionName);
-      ClusterState clusterState = zkStateReader.getClusterState();
-      HashMap<String, JettySolrRunner> jettyMap = new HashMap<String, JettySolrRunner>();
-      for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) {
-        String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
-        jettyMap.put(key, jetty);
-      }
-      Collection<Slice> slices = clusterState.getSlices(collectionName);
-      // track the servers not host repliacs
-      for (Slice slice : slices) {
-        jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
-        for (Replica replica : slice.getReplicas()) {
-          jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
-        }
-      }
-      assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
-      log.info("#### Stopping a server");
-      JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
-      jettys = miniCluster.getJettySolrRunners();
-      for (int i = 0; i < jettys.size(); ++i) {
-        if (jettys.get(i).equals(jettyToStop)) {
-          miniCluster.stopJettySolrRunner(i);
-          assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
-        }
-      }
-
-      // re-create a server (to restore original NUM_SERVERS count)
-      log.info("#### Starting a server");
-      startedServer = miniCluster.startJettySolrRunner(jettyToStop);
-      assertTrue(startedServer.isRunning());
-      assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
-
-      CollectionAdminRequest.deleteCollection(collectionName).process(miniCluster.getSolrClient());
-
-      // create it again
-      String asyncId2 = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
-      createCollection(miniCluster, collectionName, null, asyncId2, null, null);
-      AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-      // check that there's no left-over state
-      assertEquals(0, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-      cloudSolrClient.add(doc);
-      cloudSolrClient.commit();
-      assertEquals(1, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-
-    }
-    finally {
-      miniCluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
-
-    final String collectionName = "testSolrCloudCollectionWithoutCores";
-    final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
-    final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
-
-    try {
-      assertNotNull(miniCluster.getZkServer());
-      assertFalse(miniCluster.getJettySolrRunners().isEmpty());
-
-      // create collection
-      final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
-      createCollection(miniCluster, collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY, asyncId, null, null);
-
-      try (SolrZkClient zkClient = new SolrZkClient
-          (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
-          ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
-        zkStateReader.createClusterStateWatchersAndUpdate();
-
-        // wait for collection to appear
-        AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-        // check the collection's corelessness
-        {
-          int coreCount = 0; 
-          for (Map.Entry<String,Slice> entry : zkStateReader.getClusterState().getSlicesMap(collectionName).entrySet()) {
-            coreCount += entry.getValue().getReplicasMap().entrySet().size();
-          }
-          assertEquals(0, coreCount);
-        }
-
-      }
-    }
-    finally {
-      miniCluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testStopAllStartAll() throws Exception {
-
-    final String collectionName = "testStopAllStartAllCollection";
-
-    final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
-
-    try {
-      assertNotNull(miniCluster.getZkServer());
-      List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
-      assertEquals(NUM_SERVERS, jettys.size());
-      for (JettySolrRunner jetty : jettys) {
-        assertTrue(jetty.isRunning());
-      }
-
-      createCollection(miniCluster, collectionName, null, null, Boolean.TRUE, null);
-      final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
-      cloudSolrClient.setDefaultCollection(collectionName);
-      final SolrQuery query = new SolrQuery("*:*");
-      final SolrInputDocument doc = new SolrInputDocument();
-
-      try (SolrZkClient zkClient = new SolrZkClient
-          (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
-          ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
-        zkStateReader.createClusterStateWatchersAndUpdate();
-        AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-        // modify collection
-        final int numDocs = 1 + random().nextInt(10);
-        for (int ii = 1; ii <= numDocs; ++ii) {
-          doc.setField("id", ""+ii);
-          cloudSolrClient.add(doc);
-          if (ii*2 == numDocs) cloudSolrClient.commit();
-        }
-        cloudSolrClient.commit();
-        // query collection
-        {
-          final QueryResponse rsp = cloudSolrClient.query(query);
-          assertEquals(numDocs, rsp.getResults().getNumFound());
-        }
-
-        // the test itself
-        zkStateReader.forceUpdateCollection(collectionName);
-        final ClusterState clusterState = zkStateReader.getClusterState();
-
-        final HashSet<Integer> leaderIndices = new HashSet<Integer>();
-        final HashSet<Integer> followerIndices = new HashSet<Integer>();
-        {
-          final HashMap<String,Boolean> shardLeaderMap = new HashMap<String,Boolean>();
-          for (final Slice slice : clusterState.getSlices(collectionName)) {
-            for (final Replica replica : slice.getReplicas()) {
-              shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
-            }
-            shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
-          }
-          for (int ii = 0; ii < jettys.size(); ++ii) {
-            final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
-            final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
-            final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
-            if (Boolean.TRUE.equals(isLeader)) {
-              leaderIndices.add(new Integer(ii));
-            } else if (Boolean.FALSE.equals(isLeader)) {
-              followerIndices.add(new Integer(ii));
-            } // else neither leader nor follower i.e. node without a replica (for our collection)
-          }
-        }
-        final List<Integer> leaderIndicesList = new ArrayList<Integer>(leaderIndices);
-        final List<Integer> followerIndicesList = new ArrayList<Integer>(followerIndices);
-
-        // first stop the followers (in no particular order)
-        Collections.shuffle(followerIndicesList, random());
-        for (Integer ii : followerIndicesList) {
-          if (!leaderIndices.contains(ii)) {
-            miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
-          }
-        }
-
-        // then stop the leaders (again in no particular order)
-        Collections.shuffle(leaderIndicesList, random());
-        for (Integer ii : leaderIndicesList) {
-          miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
-        }
-
-        // calculate restart order
-        final List<Integer> restartIndicesList = new ArrayList<Integer>();
-        Collections.shuffle(leaderIndicesList, random());
-        restartIndicesList.addAll(leaderIndicesList);
-        Collections.shuffle(followerIndicesList, random());
-        restartIndicesList.addAll(followerIndicesList);
-        if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
-
-        // and then restart jettys in that order
-        for (Integer ii : restartIndicesList) {
-          final JettySolrRunner jetty = jettys.get(ii.intValue());
-          if (!jetty.isRunning()) {
-            miniCluster.startJettySolrRunner(jetty);
-            assertTrue(jetty.isRunning());
-          }
-        }
-        AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-        zkStateReader.forceUpdateCollection(collectionName);
-
-        // re-query collection
-        {
-          final QueryResponse rsp = cloudSolrClient.query(query);
-          assertEquals(numDocs, rsp.getResults().getNumFound());
-        }
-
-      }
-    }
-    finally {
-      miniCluster.shutdown();
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java
deleted file mode 100644
index e2b0aea..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterKerberos.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.File;
-import java.nio.charset.StandardCharsets;
-
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
-import org.apache.solr.util.BadZookeeperThreadsFilter;
-import org.apache.solr.util.RevertDefaultThreadHandlerRule;
-import org.junit.ClassRule;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.RuleChain;
-import org.junit.rules.TestRule;
-
-/**
- * Test 5 nodes Solr cluster with Kerberos plugin enabled.
- * This test is Ignored right now as Mini KDC has a known bug that
- * doesn't allow us to run multiple nodes on the same host.
- * https://issues.apache.org/jira/browse/HADOOP-9893
- */
-@ThreadLeakFilters(defaultFilters = true, filters = {
-    BadZookeeperThreadsFilter.class // Zookeeper login leaks TGT renewal threads
-})
-
-@Ignore
-@LuceneTestCase.Slow
-@SuppressSysoutChecks(bugUrl = "Solr logs to JUL")
-public class TestMiniSolrCloudClusterKerberos extends TestMiniSolrCloudCluster {
-
-  public TestMiniSolrCloudClusterKerberos () {
-    NUM_SERVERS = 5;
-    NUM_SHARDS = 2;
-    REPLICATION_FACTOR = 2;
-  }
-  
-  private KerberosTestServices kerberosTestServices;
-
-  @Rule
-  public TestRule solrTestRules = RuleChain
-      .outerRule(new SystemPropertiesRestoreRule());
-
-  @ClassRule
-  public static TestRule solrClassRules = RuleChain.outerRule(
-      new SystemPropertiesRestoreRule()).around(
-      new RevertDefaultThreadHandlerRule());
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    setupMiniKdc();
-  }
-  
-  private void setupMiniKdc() throws Exception {
-    String kdcDir = createTempDir()+File.separator+"minikdc";
-    File keytabFile = new File(kdcDir, "keytabs");
-    String principal = "HTTP/127.0.0.1";
-    String zkServerPrincipal = "zookeeper/127.0.0.1";
-    KerberosTestServices kerberosTestServices = KerberosTestServices.builder()
-        .withKdc(new File(kdcDir))
-        .withJaasConfiguration(principal, keytabFile, zkServerPrincipal, keytabFile)
-        .build();
-
-    kerberosTestServices.start();
-    kerberosTestServices.getKdc().createPrincipal(keytabFile, principal, zkServerPrincipal);
-
-    String jaas = "Client {\n"
-        + " com.sun.security.auth.module.Krb5LoginModule required\n"
-        + " useKeyTab=true\n"
-        + " keyTab=\""+keytabFile.getAbsolutePath()+"\"\n"
-        + " storeKey=true\n"
-        + " useTicketCache=false\n"
-        + " doNotPrompt=true\n"
-        + " debug=true\n"
-        + " principal=\""+principal+"\";\n" 
-        + "};\n"
-        + "Server {\n"
-        + " com.sun.security.auth.module.Krb5LoginModule required\n"
-        + " useKeyTab=true\n"
-        + " keyTab=\""+keytabFile.getAbsolutePath()+"\"\n"
-        + " storeKey=true\n"
-        + " doNotPrompt=true\n"
-        + " useTicketCache=false\n"
-        + " debug=true\n"
-        + " principal=\""+zkServerPrincipal+"\";\n" 
-        + "};\n";
-
-    String jaasFilePath = kdcDir+File.separator + "jaas-client.conf";
-    FileUtils.write(new File(jaasFilePath), jaas, StandardCharsets.UTF_8);
-    System.setProperty("java.security.auth.login.config", jaasFilePath);
-    System.setProperty("solr.kerberos.cookie.domain", "127.0.0.1");
-    System.setProperty("solr.kerberos.principal", principal);
-    System.setProperty("solr.kerberos.keytab", keytabFile.getAbsolutePath());
-    System.setProperty("authenticationPlugin", "org.apache.solr.security.KerberosPlugin");
-
-    // more debugging, if needed
-    /*System.setProperty("sun.security.jgss.debug", "true");
-    System.setProperty("sun.security.krb5.debug", "true");
-    System.setProperty("sun.security.jgss.debug", "true");
-    System.setProperty("java.security.debug", "logincontext,policy,scl,gssloginconfig");*/
-  }
-  
-  @AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/HADOOP-9893")
-  @Test
-  @Override
-  public void testCollectionCreateSearchDelete() throws Exception {
-    super.testCollectionCreateSearchDelete();
-  }
-  
-  @Override
-  public void tearDown() throws Exception {
-    System.clearProperty("java.security.auth.login.config");
-    System.clearProperty("cookie.domain");
-    System.clearProperty("kerberos.principal");
-    System.clearProperty("kerberos.keytab");
-    System.clearProperty("authenticationPlugin");
-    kerberosTestServices.stop();
-    super.tearDown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java
index 68af59f..08b1717 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java
@@ -19,38 +19,22 @@ package org.apache.solr.cloud;
 import java.io.File;
 import java.lang.invoke.MethodHandles;
 import java.nio.charset.StandardCharsets;
-import java.util.List;
-import java.util.Properties;
 
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 import org.apache.commons.io.FileUtils;
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.embedded.JettyConfig;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.solr.index.TieredMergePolicyFactory;
 import org.apache.solr.util.BadZookeeperThreadsFilter;
-import org.apache.solr.util.RevertDefaultThreadHandlerRule;
 import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.RuleChain;
-import org.junit.rules.TestRule;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
-
 /**
  * Test 5 nodes Solr cluster with Kerberos plugin enabled.
  * This test is Ignored right now as Mini KDC has a known bug that
@@ -62,31 +46,19 @@ import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
 })
 
 @LuceneTestCase.Slow
-@LuceneTestCase.SuppressSysoutChecks(bugUrl = "Solr logs to JUL")
-public class TestSolrCloudWithKerberosAlt extends LuceneTestCase {
+public class TestSolrCloudWithKerberosAlt extends SolrCloudTestCase {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  protected final int NUM_SERVERS;
-  protected final int NUM_SHARDS;
-  protected final int REPLICATION_FACTOR;
-
-  public TestSolrCloudWithKerberosAlt () {
-    NUM_SERVERS = 1;
-    NUM_SHARDS = 1;
-    REPLICATION_FACTOR = 1;
-  }
 
+  private static final int numShards = 1;
+  private static final int numReplicas = 1;
+  private static final int maxShardsPerNode = 1;
+  private static final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
+  private static final String configName = "solrCloudCollectionConfig";
+  private static final String collectionName = "testkerberoscollection";
+  
   private KerberosTestServices kerberosTestServices;
 
-  @Rule
-  public TestRule solrTestRules = RuleChain
-      .outerRule(new SystemPropertiesRestoreRule());
-
-  @ClassRule
-  public static TestRule solrClassRules = RuleChain.outerRule(
-      new SystemPropertiesRestoreRule()).around(
-      new RevertDefaultThreadHandlerRule());
-
   @BeforeClass
   public static void betterNotBeJava9() {
     assumeFalse("FIXME: SOLR-8182: This test fails under Java 9", Constants.JRE_IS_MINIMUM_JAVA9);
@@ -94,9 +66,9 @@ public class TestSolrCloudWithKerberosAlt extends LuceneTestCase {
 
   @Override
   public void setUp() throws Exception {
-    SolrTestCaseJ4.randomizeNumericTypesProperties(); // SOLR-10916
     super.setUp();
     setupMiniKdc();
+    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
   }
 
   private void setupMiniKdc() throws Exception {
@@ -141,10 +113,10 @@ public class TestSolrCloudWithKerberosAlt extends LuceneTestCase {
         );
 
     // more debugging, if needed
-    /*System.setProperty("sun.security.jgss.debug", "true");
-    System.setProperty("sun.security.krb5.debug", "true");
-    System.setProperty("sun.security.jgss.debug", "true");
-    System.setProperty("java.security.debug", "logincontext,policy,scl,gssloginconfig");*/
+    // System.setProperty("sun.security.jgss.debug", "true");
+    // System.setProperty("sun.security.krb5.debug", "true");
+    // System.setProperty("sun.security.jgss.debug", "true");
+    // System.setProperty("java.security.debug", "logincontext,policy,scl,gssloginconfig");
   }
   
   @Test
@@ -154,79 +126,47 @@ public class TestSolrCloudWithKerberosAlt extends LuceneTestCase {
     if (random().nextBoolean()) testCollectionCreateSearchDelete();
   }
 
-  protected void testCollectionCreateSearchDelete() throws Exception {
-    String collectionName = "testkerberoscollection";
+  private void testCollectionCreateSearchDelete() throws Exception {
+    CloudSolrClient client = cluster.getSolrClient();
+    CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .process(client);
 
-    MiniSolrCloudCluster miniCluster
-        = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), JettyConfig.builder().setContext("/solr").build());
-    CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
-    cloudSolrClient.setDefaultCollection(collectionName);
-    
-    try {
-      assertNotNull(miniCluster.getZkServer());
-      List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
-      assertEquals(NUM_SERVERS, jettys.size());
-      for (JettySolrRunner jetty : jettys) {
-        assertTrue(jetty.isRunning());
-      }
-
-      // create collection
-      String configName = "solrCloudCollectionConfig";
-      miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
-
-      CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS,REPLICATION_FACTOR);
-      Properties properties = new Properties();
-      properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
-      properties.put("solr.tests.maxBufferedDocs", "100000");
-      properties.put("solr.tests.ramBufferSizeMB", "100");
-      // use non-test classes so RandomizedRunner isn't necessary
-      properties.put(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
-      properties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
-      properties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
-      createRequest.setProperties(properties);
-      
-      createRequest.process(cloudSolrClient);
-      
-      try (SolrZkClient zkClient = new SolrZkClient
-          (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
-           ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
-        zkStateReader.createClusterStateWatchersAndUpdate();
-        AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-        // modify/query collection
-        
-        SolrInputDocument doc = new SolrInputDocument();
-        doc.setField("id", "1");
-        cloudSolrClient.add(doc);
-        cloudSolrClient.commit();
-        SolrQuery query = new SolrQuery();
-        query.setQuery("*:*");
-        QueryResponse rsp = cloudSolrClient.query(query);
-        assertEquals(1, rsp.getResults().getNumFound());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish
+        (collectionName, client.getZkStateReader(), true, true, 330);
+
+    // modify/query collection
+
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
+    assertEquals(1, rsp.getResults().getNumFound());
         
-        // delete the collection we created earlier
-        CollectionAdminRequest.deleteCollection(collectionName).process(cloudSolrClient);
+    // delete the collection we created earlier
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
         
-        AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
-      }
-    }
-    finally {
-      cloudSolrClient.close();
-      miniCluster.shutdown();
-    }
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
   }
 
   @Override
   public void tearDown() throws Exception {
+    System.clearProperty("solr.jaas.debug");
     System.clearProperty("java.security.auth.login.config");
-    System.clearProperty("cookie.domain");
-    System.clearProperty("kerberos.principal");
-    System.clearProperty("kerberos.keytab");
+    System.clearProperty("solr.kerberos.jaas.appname");
+    System.clearProperty("solr.kerberos.cookie.domain");
+    System.clearProperty("solr.kerberos.principal");
+    System.clearProperty("solr.kerberos.keytab");
     System.clearProperty("authenticationPlugin");
+    System.clearProperty("solr.kerberos.delegation.token.enabled");
     System.clearProperty("solr.kerberos.name.rules");
-    System.clearProperty("solr.jaas.debug");
+    
+    // more debugging, if needed
+    // System.clearProperty("sun.security.jgss.debug");
+    // System.clearProperty("sun.security.krb5.debug");
+    // System.clearProperty("sun.security.jgss.debug");
+    // System.clearProperty("java.security.debug");
+
     kerberosTestServices.stop();
-    SolrTestCaseJ4.clearNumericTypesProperties(); // SOLR-10916
     super.tearDown();
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7328e592/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index cf54499..607d0eb 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -2685,16 +2685,14 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
   /**
    * Sets various sys props related to user specified or randomized choices regarding the types 
    * of numerics that should be used in tests.
-   * <p>
-   * TODO: This method can be private once SOLR-10916 is resolved
-   * </p>
+   *
    * @see #NUMERIC_DOCVALUES_SYSPROP
    * @see #NUMERIC_POINTS_SYSPROP
    * @see #clearNumericTypesProperties
    * @lucene.experimental
    * @lucene.internal
    */
-  public static void randomizeNumericTypesProperties() {
+  private static void randomizeNumericTypesProperties() {
 
     final boolean useDV = random().nextBoolean();
     System.setProperty(NUMERIC_DOCVALUES_SYSPROP, ""+useDV);
@@ -2738,14 +2736,12 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
   
   /**
    * Cleans up the randomized sysproperties and variables set by {@link #randomizeNumericTypesProperties}
-   * <p>
-   * TODO: This method can be private once SOLR-10916 is resolved
-   * </p>
+   *
    * @see #randomizeNumericTypesProperties
    * @lucene.experimental
    * @lucene.internal
    */
-  public static void clearNumericTypesProperties() {
+  private static void clearNumericTypesProperties() {
     org.apache.solr.schema.PointField.TEST_HACK_IGNORE_USELESS_TRIEFIELD_ARGS = false;
     System.clearProperty("solr.tests.numeric.points");
     System.clearProperty("solr.tests.numeric.points.dv");