You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by dw...@apache.org on 2020/11/03 10:27:12 UTC

[lucene-solr] branch branch_8x updated: SOLR-14981: Remove search results clustering contrib from 8x (#2058)

This is an automated email from the ASF dual-hosted git repository.

dweiss pushed a commit to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git


The following commit(s) were added to refs/heads/branch_8x by this push:
     new e334ba1  SOLR-14981: Remove search results clustering contrib from 8x (#2058)
e334ba1 is described below

commit e334ba104a4a97fa59e5c5175ee97aa85adffe8d
Author: Dawid Weiss <da...@carrotsearch.com>
AuthorDate: Tue Nov 3 11:26:50 2020 +0100

    SOLR-14981: Remove search results clustering contrib from 8x (#2058)
---
 lucene/ivy-versions.properties                     |   7 -
 solr/CHANGES.txt                                   |   3 +
 solr/bin/solr.in.cmd                               |   1 -
 solr/bin/solr.in.sh                                |   1 -
 solr/contrib/clustering/README.txt                 |   4 -
 solr/contrib/clustering/build.xml                  |  28 -
 solr/contrib/clustering/ivy.xml                    |  41 --
 .../handler/clustering/ClusteringComponent.java    | 393 --------------
 .../solr/handler/clustering/ClusteringEngine.java  |  41 --
 .../solr/handler/clustering/ClusteringParams.java  |  35 --
 .../clustering/DocumentClusteringEngine.java       |  47 --
 .../handler/clustering/SearchClusteringEngine.java |  52 --
 .../clustering/carrot2/CarrotClusteringEngine.java | 565 ---------------------
 .../handler/clustering/carrot2/CarrotParams.java   |  73 ---
 .../carrot2/LuceneCarrot2StemmerFactory.java       | 246 ---------
 .../carrot2/LuceneCarrot2TokenizerFactory.java     | 167 ------
 .../clustering/carrot2/SolrResourceLocator.java    | 142 ------
 .../SolrStopwordsCarrot2LexicalDataFactory.java    | 140 -----
 .../handler/clustering/carrot2/package-info.java   |  25 -
 .../solr/handler/clustering/package-info.java      |  26 -
 solr/contrib/clustering/src/java/overview.html     |  21 -
 .../carrot2/mock-external-attrs-attributes.xml     |  10 -
 .../conf/clustering/carrot2/stoplabels.mt          |   1 -
 .../conf/clustering/carrot2/stopwords.mt           |   1 -
 .../conf/clustering/custom/stoplabels.mt           |   1 -
 .../conf/clustering/custom/stopwords.mt            |   1 -
 .../collection1/conf/mapping-ISOLatin1Accent.txt   | 246 ---------
 .../clustering/solr/collection1/conf/protwords.txt |  21 -
 .../clustering/solr/collection1/conf/schema.xml    | 350 -------------
 .../solr/collection1/conf/solrconfig.xml           | 440 ----------------
 .../clustering/solr/collection1/conf/spellings.txt |   2 -
 .../clustering/solr/collection1/conf/stopwords.txt |  59 ---
 .../clustering/solr/collection1/conf/synonyms.txt  |  31 --
 .../clustering/solr/collection1/core.properties    |   0
 .../src/test-files/clustering/solr/solr.xml        |  32 --
 .../clustering/AbstractClusteringTestCase.java     | 250 ---------
 .../clustering/ClusteringComponentTest.java        | 143 ------
 .../DistributedClusteringComponentTest.java        |  54 --
 .../clustering/MockDocumentClusteringEngine.java   |  42 --
 .../carrot2/CarrotClusteringEngineTest.java        | 542 --------------------
 .../carrot2/DuplicatingStemmerFactory.java         |  33 --
 .../carrot2/DuplicatingTokenizerFactory.java       |  51 --
 .../carrot2/EchoClusteringAlgorithm.java           |  76 ---
 .../carrot2/EchoStemsClusteringAlgorithm.java      |  74 ---
 .../carrot2/EchoTokensClusteringAlgorithm.java     |  68 ---
 .../LexicalResourcesCheckClusteringAlgorithm.java  |  79 ---
 .../carrot2/MockClusteringAlgorithm.java           | 103 ----
 solr/example/README.txt                            |   4 +-
 .../conf/clustering/carrot2/kmeans-attributes.xml  |  19 -
 .../conf/clustering/carrot2/lingo-attributes.xml   |  24 -
 .../db/conf/clustering/carrot2/stc-attributes.xml  |  19 -
 .../conf/clustering/carrot2/kmeans-attributes.xml  |  19 -
 .../conf/clustering/carrot2/lingo-attributes.xml   |  24 -
 .../conf/clustering/carrot2/stc-attributes.xml     |  19 -
 .../conf/clustering/carrot2/kmeans-attributes.xml  |  19 -
 .../conf/clustering/carrot2/lingo-attributes.xml   |  24 -
 .../conf/clustering/carrot2/stc-attributes.xml     |  19 -
 solr/example/files/conf/solrconfig.xml             |   5 -
 solr/licenses/attributes-binder-1.3.3.jar.sha1     |   1 -
 solr/licenses/attributes-binder-LICENSE-ASL.txt    | 202 --------
 solr/licenses/attributes-binder-NOTICE.txt         |   9 -
 solr/licenses/carrot2-guava-18.0.jar.sha1          |   1 -
 solr/licenses/carrot2-guava-LICENSE-ASL.txt        | 202 --------
 solr/licenses/carrot2-guava-NOTICE.txt             |   5 -
 solr/licenses/carrot2-mini-3.16.0.jar.sha1         |   1 -
 solr/licenses/carrot2-mini-LICENSE-BSD_LIKE.txt    |  36 --
 solr/licenses/carrot2-mini-NOTICE.txt              |  10 -
 solr/licenses/simple-xml-safe-2.7.1.jar.sha1       |   1 -
 solr/licenses/simple-xml-safe-LICENSE-ASL.txt      | 202 --------
 solr/licenses/simple-xml-safe-NOTICE.txt           |   2 -
 solr/server/README.txt                             |   4 +-
 .../conf/clustering/carrot2/README.txt             |  11 -
 .../conf/clustering/carrot2/kmeans-attributes.xml  |  19 -
 .../conf/clustering/carrot2/lingo-attributes.xml   |  24 -
 .../conf/clustering/carrot2/stc-attributes.xml     |  19 -
 .../conf/solrconfig.xml                            |  92 ----
 .../conf/velocity/README.txt                       |   7 +-
 .../conf/velocity/cluster.vm                       |  19 -
 .../conf/velocity/cluster_results.vm               |  31 --
 .../conf/velocity/facets.vm                        |   1 -
 .../src/overview-of-searching-in-solr.adoc         |   4 +-
 ...andlers-and-searchcomponents-in-solrconfig.adoc |   1 -
 solr/solr-ref-guide/src/result-clustering.adoc     | 346 -------------
 solr/solr-ref-guide/src/searching.adoc             |   2 -
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc    |   8 +
 .../apache/solr/client/solrj/response/Cluster.java |   2 -
 .../test-files/solrj/sampleClusteringResponse.xml  | 112 ----
 .../solrj/response/TestClusteringResponse.java     |  72 ---
 .../solr/common/cloud/TestZkMaintenanceUtils.java  |   2 -
 .../apache/solr/common/util/TestJavaBinCodec.java  |   8 -
 .../src/java/org/apache/solr/SolrTestCaseJ4.java   |   1 -
 91 files changed, 18 insertions(+), 6402 deletions(-)

diff --git a/lucene/ivy-versions.properties b/lucene/ivy-versions.properties
index f1c6b6f..f488be9 100644
--- a/lucene/ivy-versions.properties
+++ b/lucene/ivy-versions.properties
@@ -6,8 +6,6 @@ com.carrotsearch.randomizedtesting.version = 2.7.2
 /com.carrotsearch.randomizedtesting/junit4-ant = ${com.carrotsearch.randomizedtesting.version}
 /com.carrotsearch.randomizedtesting/randomizedtesting-runner = ${com.carrotsearch.randomizedtesting.version}
 
-/com.carrotsearch.thirdparty/simple-xml-safe = 2.7.1
-
 /com.carrotsearch/hppc = 0.8.1
 
 /com.cybozu.labs/langdetect = 1.1-20120112
@@ -241,11 +239,6 @@ org.bouncycastle.version = 1.65
 
 /org.brotli/dec = 0.1.2
 
-/org.carrot2.attributes/attributes-binder = 1.3.3
-/org.carrot2.shaded/carrot2-guava = 18.0
-
-/org.carrot2/carrot2-mini = 3.16.0
-
 org.carrot2.morfologik.version = 2.1.5
 /org.carrot2/morfologik-fsa = ${org.carrot2.morfologik.version}
 /org.carrot2/morfologik-polish = ${org.carrot2.morfologik.version}
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2767434..d3738262 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -41,6 +41,9 @@ Other Changes
 
 * SOLR-14954: Heavily edit reindexing.adoc (Sameul García Martínez and Erick Erickson)
 
+* SOLR-14981: RemoveD search results clustering contrib from 8x (due to Carrot2 incompatibility with Java 1.8). 
+  (Dawid Weiss)
+
 ==================  8.7.0 ==================
 
 Consult the lucene/CHANGES.txt file for additional, low level, changes in this release.
diff --git a/solr/bin/solr.in.cmd b/solr/bin/solr.in.cmd
index c9aa9e0..8dcc1e3 100755
--- a/solr/bin/solr.in.cmd
+++ b/solr/bin/solr.in.cmd
@@ -80,7 +80,6 @@ REM start command line as-is, in ADDITION to other options. If you specify the
 REM -a option on start script, those options will be appended as well. Examples:
 REM set SOLR_OPTS=%SOLR_OPTS% -Dsolr.autoSoftCommit.maxTime=3000
 REM set SOLR_OPTS=%SOLR_OPTS% -Dsolr.autoCommit.maxTime=60000
-REM set SOLR_OPTS=%SOLR_OPTS% -Dsolr.clustering.enabled=true
 
 REM Path to a directory for Solr to store cores and their data. By default, Solr will use server\solr
 REM If solr.xml is not stored in ZooKeeper, this directory needs to contain solr.xml
diff --git a/solr/bin/solr.in.sh b/solr/bin/solr.in.sh
index ddafdad..84c0956 100644
--- a/solr/bin/solr.in.sh
+++ b/solr/bin/solr.in.sh
@@ -96,7 +96,6 @@
 # -a option on start script, those options will be appended as well. Examples:
 #SOLR_OPTS="$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000"
 #SOLR_OPTS="$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000"
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.clustering.enabled=true"
 
 # Location where the bin/solr script will save PID files for running instances
 # If not set, the script will create PID files in $SOLR_TIP/bin
diff --git a/solr/contrib/clustering/README.txt b/solr/contrib/clustering/README.txt
deleted file mode 100644
index 5e9dcb5..0000000
--- a/solr/contrib/clustering/README.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-The Clustering contrib plugin for Solr provides a generic mechanism for plugging in third party clustering implementations.
-It currently provides clustering support for search results using the Carrot2 project.
-
-See https://lucene.apache.org/solr/guide/result-clustering for how to get started.
diff --git a/solr/contrib/clustering/build.xml b/solr/contrib/clustering/build.xml
deleted file mode 100644
index 7340a1f..0000000
--- a/solr/contrib/clustering/build.xml
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
-
-<project name="solr-clustering" default="default">
-
-  <description>
-    Clustering Integraton
-  </description>
-
-  <import file="../contrib-build.xml"/>
-
-</project>
diff --git a/solr/contrib/clustering/ivy.xml b/solr/contrib/clustering/ivy.xml
deleted file mode 100644
index 1de378c..0000000
--- a/solr/contrib/clustering/ivy.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one
-   or more contributor license agreements.  See the NOTICE file
-   distributed with this work for additional information
-   regarding copyright ownership.  The ASF licenses this file
-   to you under the Apache License, Version 2.0 (the
-   "License"); you may not use this file except in compliance
-   with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing,
-   software distributed under the License is distributed on an
-   "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-   KIND, either express or implied.  See the License for the
-   specific language governing permissions and limitations
-   under the License.    
--->
-<ivy-module version="2.0">
-  <info organisation="org.apache.solr" module="clustering"/>
-  <configurations defaultconfmapping="compile->master;test->master">
-    <conf name="compile" transitive="false"/>
-    <conf name="test" transitive="false"/>
-  </configurations>
-  <dependencies>
-    <dependency org="org.carrot2" name="carrot2-mini" rev="${/org.carrot2/carrot2-mini}" conf="compile"/>
-    <dependency org="org.carrot2.shaded" name="carrot2-guava" rev="${/org.carrot2.shaded/carrot2-guava}" conf="compile"/>
-    <dependency org="org.carrot2.attributes" name="attributes-binder" rev="${/org.carrot2.attributes/attributes-binder}" conf="compile"/>
-
-    <dependency org="com.carrotsearch.thirdparty" name="simple-xml-safe" rev="${/com.carrotsearch.thirdparty/simple-xml-safe}" conf="compile"/>
-
-    <dependency org="com.fasterxml.jackson.core" name="jackson-annotations"  rev="${/com.fasterxml.jackson.core/jackson-annotations}"   conf="compile"/>
-    <dependency org="com.fasterxml.jackson.core" name="jackson-databind"     rev="${/com.fasterxml.jackson.core/jackson-databind}"      conf="compile"/>
-
-    <!--
-    NOTE: There are dependencies that are part of core Solr server (jackson-core, HPPC, etc.).
-    -->
-
-    <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/>
-  </dependencies>
-</ivy-module>
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java
deleted file mode 100644
index 1c2b471..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java
+++ /dev/null
@@ -1,393 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexableField;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.handler.clustering.carrot2.CarrotClusteringEngine;
-import org.apache.solr.handler.component.ResponseBuilder;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.DocIterator;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocListAndSet;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Provides a plugin for performing cluster analysis. This can either be applied to 
- * search results (e.g., via <a href="http://project.carrot2.org">Carrot<sup>2</sup></a>) or for
- * clustering documents (e.g., via <a href="http://mahout.apache.org/">Mahout</a>).
- * <p>
- * See Solr example for configuration examples.</p>
- * 
- * @lucene.experimental
- */
-public class ClusteringComponent extends SearchComponent implements SolrCoreAware {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /**
-   * Base name for all component parameters. This name is also used to
-   * register this component with SearchHandler.
-   */
-  public static final String COMPONENT_NAME = "clustering";
-
-  /**
-   * Declaration-order list of search clustering engines.
-   */
-  private final LinkedHashMap<String, SearchClusteringEngine> searchClusteringEngines = new LinkedHashMap<>();
-
-  /**
-   * Declaration order list of document clustering engines.
-   */
-  private final LinkedHashMap<String, DocumentClusteringEngine> documentClusteringEngines = new LinkedHashMap<>();
-
-  /**
-   * An unmodifiable view of {@link #searchClusteringEngines}.
-   */
-  private final Map<String, SearchClusteringEngine> searchClusteringEnginesView = Collections.unmodifiableMap(searchClusteringEngines);
-
-  /**
-   * Initialization parameters temporarily saved here, the component
-   * is initialized in {@link #inform(SolrCore)} because we need to know
-   * the core's {@link SolrResourceLoader}.
-   * 
-   * @see #init(NamedList)
-   */
-  private NamedList<Object> initParams;
-
-  /**
-   * Convert a DocList to a SolrDocumentList
-   *
-   * The optional param "ids" is populated with the lucene document id
-   * for each SolrDocument.
-   *
-   * @param docs The {@link org.apache.solr.search.DocList} to convert
-   * @param searcher The {@link org.apache.solr.search.SolrIndexSearcher} to use to load the docs from the Lucene index
-   * @param fields The names of the Fields to load
-   * @param ids A map to store the ids of the docs
-   * @return The new {@link SolrDocumentList} containing all the loaded docs
-   * @throws IOException if there was a problem loading the docs
-   * @since solr 1.4
-   */
-  public static SolrDocumentList docListToSolrDocumentList(
-      DocList docs,
-      SolrIndexSearcher searcher,
-      Set<String> fields,
-      Map<SolrDocument, Integer> ids ) throws IOException
-  {
-    IndexSchema schema = searcher.getSchema();
-
-    SolrDocumentList list = new SolrDocumentList();
-    list.setNumFound(docs.matches());
-    list.setMaxScore(docs.maxScore());
-    list.setStart(docs.offset());
-
-    DocIterator dit = docs.iterator();
-
-    while (dit.hasNext()) {
-      int docid = dit.nextDoc();
-
-      Document luceneDoc = searcher.doc(docid, fields);
-      SolrDocument doc = new SolrDocument();
-
-      for( IndexableField field : luceneDoc) {
-        if (null == fields || fields.contains(field.name())) {
-          SchemaField sf = schema.getField( field.name() );
-          doc.addField( field.name(), sf.getType().toObject( field ) );
-        }
-      }
-      if (docs.hasScores() && (null == fields || fields.contains("score"))) {
-        doc.addField("score", dit.score());
-      }
-
-      list.add( doc );
-
-      if( ids != null ) {
-        ids.put( doc, docid );
-      }
-    }
-    return list;
-  }
-
-  @Override
-  @SuppressWarnings({"rawtypes", "unchecked"})
-  public void init(NamedList args) {
-    this.initParams = args;
-    super.init(args);
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public void inform(SolrCore core) {
-    if (initParams != null) {
-      log.info("Initializing Clustering Engines");
-
-      // Our target list of engines, split into search-results and document clustering.
-      SolrResourceLoader loader = core.getResourceLoader();
-  
-      for (Map.Entry<String,Object> entry : initParams) {
-        if ("engine".equals(entry.getKey())) {
-          NamedList<Object> engineInitParams = (NamedList<Object>) entry.getValue();
-          Boolean optional = engineInitParams.getBooleanArg("optional");
-          optional = (optional == null ? Boolean.FALSE : optional);
-
-          String engineClassName = StringUtils.defaultIfBlank( 
-              (String) engineInitParams.get("classname"),
-              CarrotClusteringEngine.class.getName()); 
-  
-          // Instantiate the clustering engine and split to appropriate map. 
-          final ClusteringEngine engine = loader.newInstance(engineClassName, ClusteringEngine.class);
-          final String name = StringUtils.defaultIfBlank(engine.init(engineInitParams, core), "");
-
-          if (!engine.isAvailable()) {
-            if (optional) {
-              log.info("Optional clustering engine not available: {}", name);
-            } else {
-              throw new SolrException(ErrorCode.SERVER_ERROR, 
-                  "A required clustering engine failed to initialize, check the logs: " + name);
-            }
-          }
-          
-          final ClusteringEngine previousEntry;
-          if (engine instanceof SearchClusteringEngine) {
-            previousEntry = searchClusteringEngines.put(name, (SearchClusteringEngine) engine);
-          } else if (engine instanceof DocumentClusteringEngine) {
-            previousEntry = documentClusteringEngines.put(name, (DocumentClusteringEngine) engine);
-          } else {
-            log.warn("Unknown type of a clustering engine for class: {}", engineClassName);
-            continue;
-          }
-          if (previousEntry != null) {
-            log.warn("Duplicate clustering engine component named '{}'.", name);
-          }
-        }
-      }
-
-      // Set up the default engine key for both types of engines.
-      setupDefaultEngine("search results clustering", searchClusteringEngines);
-      setupDefaultEngine("document clustering", documentClusteringEngines);
-
-      log.info("Finished Initializing Clustering Engines");
-    }
-  }
-
-  @Override
-  public void prepare(ResponseBuilder rb) throws IOException {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) {
-      return;
-    }
-  }
-
-  @Override
-  public void process(ResponseBuilder rb) throws IOException {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false)) {
-      return;
-    }
-
-    final String name = getClusteringEngineName(rb);
-    boolean useResults = params.getBool(ClusteringParams.USE_SEARCH_RESULTS, false);
-    if (useResults == true) {
-      SearchClusteringEngine engine = searchClusteringEngines.get(name);
-      if (engine != null) {
-        checkAvailable(name, engine);
-        DocListAndSet results = rb.getResults();
-        Map<SolrDocument,Integer> docIds = new HashMap<>(results.docList.size());
-        SolrDocumentList solrDocList = docListToSolrDocumentList(
-            results.docList, rb.req.getSearcher(), engine.getFieldsToLoad(rb.req), docIds);
-        Object clusters = engine.cluster(rb.getQuery(), solrDocList, docIds, rb.req);
-        rb.rsp.add("clusters", clusters);
-      } else {
-        log.warn("No engine named: {}", name);
-      }
-    }
-
-    boolean useCollection = params.getBool(ClusteringParams.USE_COLLECTION, false);
-    if (useCollection == true) {
-      DocumentClusteringEngine engine = documentClusteringEngines.get(name);
-      if (engine != null) {
-        checkAvailable(name, engine);
-        boolean useDocSet = params.getBool(ClusteringParams.USE_DOC_SET, false);
-        NamedList<?> nl = null;
-
-        // TODO: This likely needs to be made into a background task that runs in an executor
-        if (useDocSet == true) {
-          nl = engine.cluster(rb.getResults().docSet, params);
-        } else {
-          nl = engine.cluster(params);
-        }
-        rb.rsp.add("clusters", nl);
-      } else {
-        log.warn("No engine named: {}", name);
-      }
-    }
-  }
-
-  private void checkAvailable(String name, ClusteringEngine engine) {
-    if (!engine.isAvailable()) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, 
-          "Clustering engine declared, but not available, check the logs: " + name);
-    }
-  }
-
-  private String getClusteringEngineName(ResponseBuilder rb){
-    return rb.req.getParams().get(ClusteringParams.ENGINE_NAME, ClusteringEngine.DEFAULT_ENGINE_NAME);
-  }
-
-  @Override
-  public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false) || !params.getBool(ClusteringParams.USE_SEARCH_RESULTS, false)) {
-      return;
-    }
-    sreq.params.remove(COMPONENT_NAME);
-    if( ( sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS ) != 0 ){
-      String fl = sreq.params.get(CommonParams.FL,"*");
-      // if fl=* then we don't need to check.
-      if (fl.indexOf('*') >= 0) { 
-        return;
-      }
-
-      String name = getClusteringEngineName(rb);
-      SearchClusteringEngine engine = searchClusteringEngines.get(name);
-      if (engine != null) {
-        checkAvailable(name, engine);
-        Set<String> fields = engine.getFieldsToLoad(rb.req);
-        if (fields == null || fields.size() == 0) { 
-          return;
-        }
-  
-        StringBuilder sb = new StringBuilder();
-        String[] flparams = fl.split( "[,\\s]+" );
-        Set<String> flParamSet = new HashSet<>(flparams.length);
-        for (String flparam : flparams) {
-          // no need trim() because of split() by \s+
-          flParamSet.add(flparam);
-        }
-        for (String aFieldToLoad : fields) {
-          if (!flParamSet.contains(aFieldToLoad )) {
-            sb.append(',').append(aFieldToLoad);
-          }
-        }
-        if (sb.length() > 0) {
-          sreq.params.set(CommonParams.FL, fl + sb.toString());
-        }
-      } else {
-        log.warn("No engine named: {}", name);
-      }
-    }
-  }
-
-  @Override
-  public void finishStage(ResponseBuilder rb) {
-    SolrParams params = rb.req.getParams();
-    if (!params.getBool(COMPONENT_NAME, false) || 
-        !params.getBool(ClusteringParams.USE_SEARCH_RESULTS, false)) {
-      return;
-    }
-
-    if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
-      String name = getClusteringEngineName(rb);
-      SearchClusteringEngine engine = searchClusteringEngines.get(name);
-      if (engine != null) {
-        checkAvailable(name, engine);
-        SolrDocumentList solrDocList = (SolrDocumentList) rb.rsp.getResponse();
-        // TODO: Currently, docIds is set to null in distributed environment.
-        // This causes CarrotParams.PRODUCE_SUMMARY doesn't work.
-        // To work CarrotParams.PRODUCE_SUMMARY under distributed mode, we can choose either one of:
-        // (a) In each shard, ClusteringComponent produces summary and finishStage()
-        //     merges these summaries.
-        // (b) Adding doHighlighting(SolrDocumentList, ...) method to SolrHighlighter and
-        //     making SolrHighlighter uses "external text" rather than stored values to produce snippets.
-        Map<SolrDocument,Integer> docIds = null;
-        Object clusters = engine.cluster(rb.getQuery(), solrDocList, docIds, rb.req);
-        rb.rsp.add("clusters", clusters);
-      } else {
-        log.warn("No engine named: {}", name);
-      }
-    }
-  }
-
-  /**
-   * @return Expose for tests.
-   */
-  Map<String, SearchClusteringEngine> getSearchClusteringEngines() {
-    return searchClusteringEnginesView;
-  }
-
-  @Override
-  public String getDescription() {
-    return "A Clustering component";
-  }
-
-  /**
-   * Setup the default clustering engine.
-   * @see "https://issues.apache.org/jira/browse/SOLR-5219"
-   */
-  private static <T extends ClusteringEngine> void setupDefaultEngine(String type, LinkedHashMap<String,T> map) {
-    // If there's already a default algorithm, leave it as is.
-    String engineName = ClusteringEngine.DEFAULT_ENGINE_NAME;
-    T defaultEngine = map.get(engineName);
-
-    if (defaultEngine == null ||
-        !defaultEngine.isAvailable()) {
-      // If there's no default algorithm, and there are any algorithms available, 
-      // the first definition becomes the default algorithm.
-      for (Map.Entry<String, T> e : map.entrySet()) {
-        if (e.getValue().isAvailable()) {
-          engineName = e.getKey();
-          defaultEngine = e.getValue();
-          map.put(ClusteringEngine.DEFAULT_ENGINE_NAME, defaultEngine);
-          break;
-        }
-      }
-    }
-
-    if (defaultEngine != null) {
-      if (log.isInfoEnabled()) {
-        log.info("Default engine for {}: {} [{}]", type, engineName, defaultEngine.getClass().getSimpleName());
-      }
-    } else {
-      log.warn("No default engine for {}.", type);
-    }
-  }
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringEngine.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringEngine.java
deleted file mode 100644
index 7b522ac..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringEngine.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
-
-/**
- * A base class for {@link SearchClusteringEngine} and {@link DocumentClusteringEngine}.
- * @lucene.experimental
- */
-public abstract class ClusteringEngine {
-  public static final String ENGINE_NAME = "name";
-  public static final String DEFAULT_ENGINE_NAME = "default";
-
-  private String name;
-
-  public String init(NamedList<?> config, SolrCore core) {
-    name = (String) config.get(ENGINE_NAME);
-    return name;
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public abstract boolean isAvailable();
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringParams.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringParams.java
deleted file mode 100644
index 66e5c54..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringParams.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-/**
- * @lucene.experimental
- */
-public interface ClusteringParams {
-
-  public static final String CLUSTERING_PREFIX = "clustering.";
-
-  public static final String ENGINE_NAME = CLUSTERING_PREFIX + "engine";
-
-  public static final String USE_SEARCH_RESULTS = CLUSTERING_PREFIX + "results";
-
-  public static final String USE_COLLECTION = CLUSTERING_PREFIX + "collection";
-
-  /**
-   * When clustering full documents, cluster on the Doc Set.
-   */
-  public static final String USE_DOC_SET = CLUSTERING_PREFIX + "docs.useDocSet";
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/DocumentClusteringEngine.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/DocumentClusteringEngine.java
deleted file mode 100644
index 8196034..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/DocumentClusteringEngine.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.search.DocSet;
-
-/**
- * @lucene.experimental
- */
-public abstract class DocumentClusteringEngine extends ClusteringEngine {
-
-  /**
-   * Experimental.  Subject to change before the next release
-   *
-   * Cluster all the documents in the index.  Clustering is often an expensive task that can take a long time.
-   * @param solrParams The params controlling clustering
-   * @return The clustering results
-   */
-  public abstract NamedList<?> cluster(SolrParams solrParams);
-
-  /**
-   * Experimental.  Subject to change before the next release
-   *
-   * Cluster the set of docs.  Clustering of documents is often an expensive task that can take a long time.
-   * @param docs The docs to cluster.  If null, cluster all docs as in {@link #cluster(org.apache.solr.common.params.SolrParams)}
-   * @param solrParams The params controlling the clustering
-   * @return The results.
-   */
-  public abstract NamedList<?> cluster(DocSet docs, SolrParams solrParams);
-
-
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/SearchClusteringEngine.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/SearchClusteringEngine.java
deleted file mode 100644
index 8f0d0d7..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/SearchClusteringEngine.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.search.Query;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-
-/**
- * Base class for clustering engines performing cluster analysis on search
- * results.
- * 
- * @lucene.experimental
- */
-public abstract class SearchClusteringEngine extends ClusteringEngine {
-  /**
-   * Do the clustering, return a clusters structure to be appended to
-   * {@link SolrQueryResponse}.
-   */
-  public abstract Object cluster(Query query, SolrDocumentList solrDocumentList,
-      Map<SolrDocument,Integer> docIds, SolrQueryRequest sreq);
-
-  /**
-   * Returns the set of field names to load.
-   * Concrete classes can override this method if needed.
-   * Default implementation returns null, that is, all stored fields are loaded.
-   * 
-   * @return The set of field names to load.
-   */
-  protected Set<String> getFieldsToLoad(SolrQueryRequest sreq){
-    return null;
-  }
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
deleted file mode 100644
index e62cd0e..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
+++ /dev/null
@@ -1,565 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Objects;
-import java.util.Set;
-import java.util.function.Supplier;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TotalHits;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.params.HighlightParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.clustering.ClusteringEngine;
-import org.apache.solr.handler.clustering.SearchClusteringEngine;
-import org.apache.solr.handler.component.HighlightComponent;
-import org.apache.solr.highlight.SolrHighlighter;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocSlice;
-import org.apache.solr.search.SolrIndexSearcher;
-import org.carrot2.core.Cluster;
-import org.carrot2.core.Controller;
-import org.carrot2.core.ControllerFactory;
-import org.carrot2.core.Document;
-import org.carrot2.core.IClusteringAlgorithm;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.core.attribute.AttributeNames;
-import org.carrot2.shaded.guava.common.base.MoreObjects;
-import org.carrot2.shaded.guava.common.base.Strings;
-import org.carrot2.text.linguistic.DefaultLexicalDataFactoryDescriptor;
-import org.carrot2.text.preprocessing.pipeline.BasicPreprocessingPipelineDescriptor;
-import org.carrot2.text.preprocessing.pipeline.BasicPreprocessingPipelineDescriptor.AttributeBuilder;
-import org.carrot2.util.attribute.AttributeValueSet;
-import org.carrot2.util.attribute.AttributeValueSets;
-import org.carrot2.util.resource.ClassLoaderLocator;
-import org.carrot2.util.resource.IResource;
-import org.carrot2.util.resource.ResourceLookup;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Search results clustering engine based on Carrot2 clustering algorithms.
- *
- * @see "http://project.carrot2.org"
- * @lucene.experimental
- */
-public class CarrotClusteringEngine extends SearchClusteringEngine {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /**
-   * The subdirectory in Solr config dir to read customized Carrot2 resources from.
-   */
-  static final String CARROT_RESOURCES_PREFIX = "clustering/carrot2";
-
-  /**
-   * Name of Carrot2 document's field containing Solr document's identifier.
-   */
-  private static final String SOLR_DOCUMENT_ID = "solrId";
-
-  /**
-   * Name of Solr document's field containing the document's identifier. To avoid
-   * repeating the content of documents in clusters on output, each cluster contains
-   * identifiers of documents it contains.
-   */
-  private String idFieldName;
-
-  /**
-   * Carrot2 controller that manages instances of clustering algorithms
-   */
-  private Controller controller = ControllerFactory.createPooling();
-  
-  /**
-   * {@link IClusteringAlgorithm} class used for actual clustering.
-   */
-  private Class<? extends IClusteringAlgorithm> clusteringAlgorithmClass;
-
-  /** Solr core we're bound to. */
-  private SolrCore core;
-
-  @Override
-  public boolean isAvailable() {
-    return clusteringAlgorithmClass != null;
-  }
-  
-  @Override
-  @SuppressWarnings("rawtypes")
-  public String init(NamedList config, final SolrCore core) {
-    this.core = core;
-
-    String result = super.init(config, core);
-    final SolrParams initParams = config.toSolrParams();
-
-    // Initialization attributes for Carrot2 controller.
-    HashMap<String, Object> initAttributes = new HashMap<>();
-
-    // Customize Carrot2's resource lookup to first look for resources
-    // using Solr's resource loader. If that fails, try loading from the classpath.
-    ResourceLookup resourceLookup = new ResourceLookup(
-      // Solr-specific resource loading.
-      new SolrResourceLocator(core, initParams),
-      // Using the class loader directly because this time we want to omit the prefix
-      new ClassLoaderLocator(core.getResourceLoader().getClassLoader()));
-
-    DefaultLexicalDataFactoryDescriptor.attributeBuilder(initAttributes)
-      .resourceLookup(resourceLookup);
-
-    // Make sure the requested Carrot2 clustering algorithm class is available
-    String carrotAlgorithmClassName = initParams.get(CarrotParams.ALGORITHM);
-    try {
-      this.clusteringAlgorithmClass = core.getResourceLoader().findClass(
-          carrotAlgorithmClassName, IClusteringAlgorithm.class);
-    } catch (SolrException s) {
-      if (!(s.getCause() instanceof ClassNotFoundException)) {
-        throw s;
-      } 
-    }
-
-    // Load Carrot2-Workbench exported attribute XMLs based on the 'name' attribute
-    // of this component. This by-name convention lookup is used to simplify configuring algorithms.
-    String componentName = initParams.get(ClusteringEngine.ENGINE_NAME);
-    if (log.isInfoEnabled()) {
-      log.info("Initializing Clustering Engine '{}'", MoreObjects.firstNonNull(componentName, "<no 'name' attribute>"));
-    }
-
-    if (!Strings.isNullOrEmpty(componentName)) {
-      IResource[] attributeXmls = resourceLookup.getAll(componentName + "-attributes.xml");
-      if (attributeXmls.length > 0) {
-        if (attributeXmls.length > 1) {
-          log.warn("More than one attribute file found, first one will be used: {}"
-              , Arrays.toString(attributeXmls)); // nowarn
-        }
-
-        withContextClassLoader(core.getResourceLoader().getClassLoader(), () -> {
-          try {
-            AttributeValueSets avs = AttributeValueSets.deserialize(attributeXmls[0].open());
-            AttributeValueSet defaultSet = avs.getDefaultAttributeValueSet();
-            initAttributes.putAll(defaultSet.getAttributeValues());
-          } catch (Exception e) {
-            throw new SolrException(ErrorCode.SERVER_ERROR, 
-                "Could not read attributes XML for clustering component: " + componentName, e);
-          }
-          return null;
-        });
-      }
-    }
-
-    // Extract solrconfig attributes, they take precedence.
-    extractCarrotAttributes(initParams, initAttributes);
-
-    // Customize the stemmer and tokenizer factories. The implementations we provide here
-    // are included in the code base of Solr, so that it's possible to refactor
-    // the Lucene APIs the factories rely on if needed.
-    // Additionally, we set a custom lexical resource factory for Carrot2 that
-    // will use both Carrot2 default stop words as well as stop words from
-    // the StopFilter defined on the field.
-    final AttributeBuilder attributeBuilder = BasicPreprocessingPipelineDescriptor.attributeBuilder(initAttributes);
-    attributeBuilder.lexicalDataFactory(SolrStopwordsCarrot2LexicalDataFactory.class);
-    if (!initAttributes.containsKey(BasicPreprocessingPipelineDescriptor.Keys.TOKENIZER_FACTORY)) {
-      attributeBuilder.tokenizerFactory(LuceneCarrot2TokenizerFactory.class);
-    }
-    if (!initAttributes.containsKey(BasicPreprocessingPipelineDescriptor.Keys.STEMMER_FACTORY)) {
-      attributeBuilder.stemmerFactory(LuceneCarrot2StemmerFactory.class);
-    }
-
-    // Pass the schema (via the core) to SolrStopwordsCarrot2LexicalDataFactory.
-    initAttributes.put("solrCore", core);
-
-    // Carrot2 uses current thread's context class loader to get
-    // certain classes (e.g. custom tokenizer/stemmer) at initialization time.
-    // To make sure classes from contrib JARs are available,
-    // we swap the context class loader for the time of clustering.
-    withContextClassLoader(core.getResourceLoader().getClassLoader(), () -> this.controller.init(initAttributes));
-
-    SchemaField uniqueField = core.getLatestSchema().getUniqueKeyField();
-    if (uniqueField == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
-          CarrotClusteringEngine.class.getSimpleName() + " requires the schema to have a uniqueKeyField");
-    }
-    this.idFieldName = uniqueField.getName();
-
-    return result;
-  }
-
-  @Override
-  public Object cluster(Query query, SolrDocumentList solrDocList,
-      Map<SolrDocument, Integer> docIds, SolrQueryRequest sreq) {
-    try {
-      // Prepare attributes for Carrot2 clustering call
-      Map<String, Object> attributes = new HashMap<>();
-      List<Document> documents = getDocuments(solrDocList, docIds, query, sreq);
-      attributes.put(AttributeNames.DOCUMENTS, documents);
-      attributes.put(AttributeNames.QUERY, query.toString());
-  
-      // Pass the fields on which clustering runs.
-      attributes.put("solrFieldNames", getFieldsForClustering(sreq));
-  
-      // Pass extra overriding attributes from the request, if any
-      extractCarrotAttributes(sreq.getParams(), attributes);
-  
-      // Perform clustering and convert to an output structure of clusters.
-      //
-      // Carrot2 uses current thread's context class loader to get
-      // certain classes (e.g. custom tokenizer/stemmer) at runtime.
-      // To make sure classes from contrib JARs are available,
-      // we swap the context class loader for the time of clustering.
-      return withContextClassLoader(core.getResourceLoader().getClassLoader(),
-          () -> clustersToNamedList(controller.process(attributes,
-              clusteringAlgorithmClass).getClusters(), sreq.getParams()));
-    } catch (Exception e) {
-      log.error("Carrot2 clustering failed", e);
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Carrot2 clustering failed", e);
-    }
-  }
-
-  @Override
-  protected Set<String> getFieldsToLoad(SolrQueryRequest sreq){
-    SolrParams solrParams = sreq.getParams();
-
-    HashSet<String> fields = new HashSet<>(getFieldsForClustering(sreq));
-    fields.add(idFieldName);
-    fields.add(solrParams.get(CarrotParams.URL_FIELD_NAME, "url"));
-    fields.addAll(getCustomFieldsMap(solrParams).keySet());
-
-    String languageField = solrParams.get(CarrotParams.LANGUAGE_FIELD_NAME);
-    if (StringUtils.isNotBlank(languageField)) { 
-      fields.add(languageField);
-    }
-    return fields;
-  }
-
-  /**
-   * Returns the names of fields that will be delivering the actual
-   * content for clustering. Currently, there are two such fields: document
-   * title and document content.
-   */
-  private Set<String> getFieldsForClustering(SolrQueryRequest sreq) {
-    SolrParams solrParams = sreq.getParams();
-
-    String titleFieldSpec = solrParams.get(CarrotParams.TITLE_FIELD_NAME, "title");
-    String snippetFieldSpec = solrParams.get(CarrotParams.SNIPPET_FIELD_NAME, titleFieldSpec);
-    if (StringUtils.isBlank(snippetFieldSpec)) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, CarrotParams.SNIPPET_FIELD_NAME
-              + " must not be blank.");
-    }
-    
-    final Set<String> fields = new HashSet<>();
-    fields.addAll(Arrays.asList(titleFieldSpec.split("[, ]")));
-    fields.addAll(Arrays.asList(snippetFieldSpec.split("[, ]")));
-    return fields;
-  }
-
-  /**
-   * Prepares Carrot2 documents for clustering.
-   */
-  private List<Document> getDocuments(SolrDocumentList solrDocList, Map<SolrDocument, Integer> docIds,
-                                      Query query, final SolrQueryRequest sreq) throws IOException {
-    SolrHighlighter highlighter = null;
-    SolrParams solrParams = sreq.getParams();
-    SolrCore core = sreq.getCore();
-
-    String urlField = solrParams.get(CarrotParams.URL_FIELD_NAME, "url");
-    String titleFieldSpec = solrParams.get(CarrotParams.TITLE_FIELD_NAME, "title");
-    String snippetFieldSpec = solrParams.get(CarrotParams.SNIPPET_FIELD_NAME, titleFieldSpec);
-    String languageField = solrParams.get(CarrotParams.LANGUAGE_FIELD_NAME, null);
-    
-    // Maps Solr field names to Carrot2 custom field names
-    Map<String, String> customFields = getCustomFieldsMap(solrParams);
-
-    // Parse language code map string into a map
-    Map<String, String> languageCodeMap = new HashMap<>();
-    if (StringUtils.isNotBlank(languageField)) {
-      for (String pair : solrParams.get(CarrotParams.LANGUAGE_CODE_MAP, "").split("[, ]")) {
-        final String[] split = pair.split(":");
-        if (split.length == 2 && StringUtils.isNotBlank(split[0]) && StringUtils.isNotBlank(split[1])) {
-          languageCodeMap.put(split[0], split[1]);
-        } else {
-          log.warn("Unsupported format for {}: '{}'. Skipping this mapping."
-              , CarrotParams.LANGUAGE_CODE_MAP, pair);
-        }
-      }
-    }
-    
-    // Get the documents
-    boolean produceSummary = solrParams.getBool(CarrotParams.PRODUCE_SUMMARY, false);
-
-    SolrQueryRequest req = null;
-    String[] snippetFieldAry = null;
-    if (produceSummary) {
-      highlighter = HighlightComponent.getHighlighter(core);
-      if (highlighter != null){
-        Map<String, Object> args = new HashMap<>();
-        snippetFieldAry = snippetFieldSpec.split("[, ]");
-        args.put(HighlightParams.FIELDS, snippetFieldAry);
-        args.put(HighlightParams.HIGHLIGHT, "true");
-        args.put(HighlightParams.SIMPLE_PRE, ""); //we don't care about actually highlighting the area
-        args.put(HighlightParams.SIMPLE_POST, "");
-        args.put(HighlightParams.FRAGSIZE, solrParams.getInt(CarrotParams.SUMMARY_FRAGSIZE, solrParams.getInt(HighlightParams.FRAGSIZE, 100)));
-        args.put(HighlightParams.SNIPPETS, solrParams.getInt(CarrotParams.SUMMARY_SNIPPETS, solrParams.getInt(HighlightParams.SNIPPETS, 1)));
-        req = new LocalSolrQueryRequest(core, query.toString(), "", 0, 1, args) {
-          @Override
-          public SolrIndexSearcher getSearcher() {
-            return sreq.getSearcher();
-          }
-        };
-      } else {
-        log.warn("No highlighter configured, cannot produce summary");
-        produceSummary = false;
-      }
-    }
-
-    Iterator<SolrDocument> docsIter = solrDocList.iterator();
-    List<Document> result = new ArrayList<>(solrDocList.size());
-
-    float[] scores = {1.0f};
-    int[] docsHolder = new int[1];
-    Query theQuery = query;
-
-    while (docsIter.hasNext()) {
-      SolrDocument sdoc = docsIter.next();
-      String snippet = null;
-      
-      // TODO: docIds will be null when running distributed search.
-      // See comment in ClusteringComponent#finishStage().
-      if (produceSummary && docIds != null) {
-        docsHolder[0] = docIds.get(sdoc).intValue();
-        DocList docAsList = new DocSlice(0, 1, docsHolder, scores, 1, 1.0f, TotalHits.Relation.EQUAL_TO);
-        NamedList<Object> highlights = highlighter.doHighlighting(docAsList, theQuery, req, snippetFieldAry);
-        if (highlights != null && highlights.size() == 1) {
-          // should only be one value given our setup
-          // should only be one document
-          @SuppressWarnings("unchecked")
-          NamedList<String []> tmp = (NamedList<String[]>) highlights.getVal(0);
-          
-          final StringBuilder sb = new StringBuilder();
-          for (int j = 0; j < snippetFieldAry.length; j++) {
-            // Join fragments with a period, so that Carrot2 does not create
-            // cross-fragment phrases, such phrases rarely make sense.
-            String [] highlt = tmp.get(snippetFieldAry[j]);
-            if (highlt != null && highlt.length > 0) {
-              for (int i = 0; i < highlt.length; i++) {
-                sb.append(highlt[i]);
-                sb.append(" . ");
-              }
-            }
-          }
-          snippet = sb.toString();
-        }
-      }
-      
-      // If summaries not enabled or summary generation failed, use full content.
-      if (snippet == null) {
-        snippet = getConcatenated(sdoc, snippetFieldSpec);
-      }
-      
-      // Create a Carrot2 document
-      Document carrotDocument = new Document(getConcatenated(sdoc, titleFieldSpec),
-              snippet, Objects.toString(sdoc.getFieldValue(urlField), ""));
-      
-      // Store Solr id of the document, we need it to map document instances 
-      // found in clusters back to identifiers.
-      carrotDocument.setField(SOLR_DOCUMENT_ID, sdoc.getFieldValue(idFieldName));
-      
-      // Set language
-      if (StringUtils.isNotBlank(languageField)) {
-        Collection<Object> languages = sdoc.getFieldValues(languageField);
-        if (languages != null) {
-          
-          // Use the first Carrot2-supported language
-          for (Object l : languages) {
-            String lang = Objects.toString(l, "");
-            
-            if (languageCodeMap.containsKey(lang)) {
-              lang = languageCodeMap.get(lang);
-            }
-            
-            // Language detection Library for Java uses dashes to separate
-            // language variants, such as 'zh-cn', but Carrot2 uses underscores.
-            if (lang.indexOf('-') > 0) {
-              lang = lang.replace('-', '_');
-            }
-            
-            // If the language is supported by Carrot2, we'll get a non-null value
-            final LanguageCode carrot2Language = LanguageCode.forISOCode(lang);
-            if (carrot2Language != null) {
-              carrotDocument.setLanguage(carrot2Language);
-              break;
-            }
-          }
-        }
-      }
-      
-      // Add custom fields
-      if (customFields != null) {
-        for (Entry<String, String> entry : customFields.entrySet()) {
-          carrotDocument.setField(entry.getValue(), sdoc.getFieldValue(entry.getKey()));
-        }
-      }
-      
-      result.add(carrotDocument);
-    }
-
-    return result;
-  }
-
-  /**
-   * Expose clustering algorithm class for tests.
-   */
-  Class<? extends IClusteringAlgorithm> getClusteringAlgorithmClass() {
-    return clusteringAlgorithmClass;
-  }
-
-  /**
-   * Prepares a map of Solr field names (keys) to the corresponding Carrot2
-   * custom field names.
-   */
-  private Map<String, String> getCustomFieldsMap(SolrParams solrParams) {
-    Map<String, String> customFields = new HashMap<>();
-    String [] customFieldsSpec = solrParams.getParams(CarrotParams.CUSTOM_FIELD_NAME);
-    if (customFieldsSpec != null) {
-      customFields = new HashMap<>();
-      for (String customFieldSpec : customFieldsSpec) {
-        String [] split = customFieldSpec.split(":"); 
-        if (split.length == 2 && StringUtils.isNotBlank(split[0]) && StringUtils.isNotBlank(split[1])) {
-          customFields.put(split[0], split[1]);
-        } else {
-          log.warn("Unsupported format for {}: '{}'. Skipping this field definition."
-              , CarrotParams.CUSTOM_FIELD_NAME, customFieldSpec);
-        }
-      }
-    }
-    return customFields;
-  }
-
-  private String getConcatenated(SolrDocument sdoc, String fieldsSpec) {
-    StringBuilder result = new StringBuilder();
-    for (String field : fieldsSpec.split("[, ]")) {
-      Collection<Object> vals = sdoc.getFieldValues(field);
-      if (vals == null) continue;
-      Iterator<Object> ite = vals.iterator();
-      while(ite.hasNext()){
-        // Join multiple values with a period so that Carrot2 does not pick up
-        // phrases that cross field value boundaries (in most cases it would
-        // create useless phrases).
-        result.append(Objects.toString(ite.next(), "")).append(" . ");
-      }
-    }
-    return result.toString().trim();
-  }
-
-  private List<NamedList<Object>> clustersToNamedList(List<Cluster> carrotClusters,
-                                   SolrParams solrParams) {
-    List<NamedList<Object>> result = new ArrayList<>();
-    clustersToNamedList(carrotClusters, result, solrParams.getBool(
-            CarrotParams.OUTPUT_SUB_CLUSTERS, true), solrParams.getInt(
-            CarrotParams.NUM_DESCRIPTIONS, Integer.MAX_VALUE));
-    return result;
-  }
-
-  private void clustersToNamedList(List<Cluster> outputClusters,
-                                   List<NamedList<Object>> parent, boolean outputSubClusters, int maxLabels) {
-    for (Cluster outCluster : outputClusters) {
-      NamedList<Object> cluster = new SimpleOrderedMap<>();
-      parent.add(cluster);
-
-      // Add labels
-      List<String> labels = outCluster.getPhrases();
-      if (labels.size() > maxLabels) {
-        labels = labels.subList(0, maxLabels);
-      }
-      cluster.add("labels", labels);
-
-      // Add cluster score
-      final Double score = outCluster.getScore();
-      if (score != null) {
-        cluster.add("score", score);
-      }
-
-      // Add other topics marker
-      if (outCluster.isOtherTopics()) {
-        cluster.add("other-topics", outCluster.isOtherTopics());
-      }
-
-      // Add documents
-      List<Document> docs = outputSubClusters ? outCluster.getDocuments() : outCluster.getAllDocuments();
-      List<Object> docList = new ArrayList<>();
-      cluster.add("docs", docList);
-      for (Document doc : docs) {
-        docList.add(doc.getField(SOLR_DOCUMENT_ID));
-      }
-
-      // Add subclusters
-      if (outputSubClusters && !outCluster.getSubclusters().isEmpty()) {
-        List<NamedList<Object>> subclusters = new ArrayList<>();
-        cluster.add("clusters", subclusters);
-        clustersToNamedList(outCluster.getSubclusters(), subclusters,
-                outputSubClusters, maxLabels);
-      }
-    }
-  }
-
-  /**
-   * Extracts parameters that can possibly match some attributes of Carrot2 algorithms.
-   */
-  private void extractCarrotAttributes(SolrParams solrParams,
-                                       Map<String, Object> attributes) {
-    // Extract all non-predefined parameters. This way, we'll be able to set all
-    // parameters of Carrot2 algorithms without defining their names as constants.
-    for (Iterator<String> paramNames = solrParams.getParameterNamesIterator(); paramNames
-            .hasNext();) {
-      String paramName = paramNames.next();
-      if (!CarrotParams.CARROT_PARAM_NAMES.contains(paramName)) {
-        attributes.put(paramName, solrParams.get(paramName));
-      }
-    }
-  }
-  
-  @SuppressForbidden(reason = "Uses context class loader as a workaround to inject correct classloader to 3rd party libs")
-  private static <T> T withContextClassLoader(ClassLoader loader, Supplier<T> action) {
-    Thread ct = Thread.currentThread();
-    ClassLoader prev = ct.getContextClassLoader();
-    try {
-      ct.setContextClassLoader(loader);
-      return action.get();
-    } finally {
-      ct.setContextClassLoader(prev);
-    }
-  }
-
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotParams.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotParams.java
deleted file mode 100644
index d0fb0d5..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotParams.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Carrot2 parameter mapping (recognized and mapped if passed via Solr configuration).
- * @lucene.experimental
- */
-public final class CarrotParams {
-
-  private static String CARROT_PREFIX = "carrot.";
-
-  public static String ALGORITHM = CARROT_PREFIX + "algorithm";
-  
-  public static String TITLE_FIELD_NAME = CARROT_PREFIX + "title";
-  public static String URL_FIELD_NAME = CARROT_PREFIX + "url";
-  public static String SNIPPET_FIELD_NAME = CARROT_PREFIX + "snippet";
-  public static String LANGUAGE_FIELD_NAME = CARROT_PREFIX + "lang";
-  public static String CUSTOM_FIELD_NAME = CARROT_PREFIX + "custom";
-  
-  public static String PRODUCE_SUMMARY = CARROT_PREFIX + "produceSummary";
-  public static String SUMMARY_FRAGSIZE = CARROT_PREFIX + "fragSize";
-  public static String SUMMARY_SNIPPETS = CARROT_PREFIX + "summarySnippets";
-
-  public static String NUM_DESCRIPTIONS = CARROT_PREFIX + "numDescriptions";
-  public static String OUTPUT_SUB_CLUSTERS = CARROT_PREFIX + "outputSubClusters";
-
-  public static String LANGUAGE_CODE_MAP = CARROT_PREFIX + "lcmap";
-
-  /**
-   * Points to Carrot<sup>2</sup> resources
-   */
-  public static String RESOURCES_DIR = CARROT_PREFIX + "resourcesDir";
-
-  static final Set<String> CARROT_PARAM_NAMES = new HashSet<>(Arrays.asList(
-          ALGORITHM, 
-          
-          TITLE_FIELD_NAME, 
-          URL_FIELD_NAME, 
-          SNIPPET_FIELD_NAME, 
-          LANGUAGE_FIELD_NAME,
-          CUSTOM_FIELD_NAME,
-          
-          PRODUCE_SUMMARY, 
-          SUMMARY_FRAGSIZE, 
-          SUMMARY_SNIPPETS, 
-          
-          NUM_DESCRIPTIONS, 
-          OUTPUT_SUB_CLUSTERS, 
-          RESOURCES_DIR,
-          LANGUAGE_CODE_MAP));
-
-  /** No instances. */
-  private CarrotParams() {}
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2StemmerFactory.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2StemmerFactory.java
deleted file mode 100644
index 1ca89d6..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2StemmerFactory.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.lang.invoke.MethodHandles;
-
-import java.nio.CharBuffer;
-import java.util.HashMap;
-
-import org.apache.lucene.analysis.ar.ArabicNormalizer;
-import org.apache.lucene.analysis.ar.ArabicStemmer;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.text.linguistic.IStemmer;
-import org.carrot2.text.linguistic.IStemmerFactory;
-import org.carrot2.util.ReflectionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.tartarus.snowball.SnowballProgram;
-import org.tartarus.snowball.ext.DanishStemmer;
-import org.tartarus.snowball.ext.DutchStemmer;
-import org.tartarus.snowball.ext.EnglishStemmer;
-import org.tartarus.snowball.ext.FinnishStemmer;
-import org.tartarus.snowball.ext.FrenchStemmer;
-import org.tartarus.snowball.ext.GermanStemmer;
-import org.tartarus.snowball.ext.HungarianStemmer;
-import org.tartarus.snowball.ext.ItalianStemmer;
-import org.tartarus.snowball.ext.NorwegianStemmer;
-import org.tartarus.snowball.ext.PortugueseStemmer;
-import org.tartarus.snowball.ext.RomanianStemmer;
-import org.tartarus.snowball.ext.RussianStemmer;
-import org.tartarus.snowball.ext.SpanishStemmer;
-import org.tartarus.snowball.ext.SwedishStemmer;
-import org.tartarus.snowball.ext.TurkishStemmer;
-
-/**
- * An implementation of Carrot2's {@link IStemmerFactory} based on Lucene's
- * APIs. Should the relevant Lucene APIs need to change, the changes can be made
- * in this class.
- * 
- * @lucene.experimental
- */
-public class LuceneCarrot2StemmerFactory implements IStemmerFactory {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public IStemmer getStemmer(LanguageCode language) {
-    switch (language) {
-    case ARABIC:
-      return ArabicStemmerFactory.createStemmer();
-
-    case CHINESE_SIMPLIFIED:
-      return IdentityStemmer.INSTANCE;
-
-    default:
-      /*
-       * For other languages, try to use snowball's stemming.
-       */
-      return SnowballStemmerFactory.createStemmer(language);
-    }
-  }
-
-  /**
-   * Factory of {@link IStemmer} implementations from the <code>snowball</code>
-   * project.
-   */
-  private final static class SnowballStemmerFactory {
-    /**
-     * Static hard mapping from language codes to stemmer classes in Snowball.
-     * This mapping is not dynamic because we want to keep the possibility to
-     * obfuscate these classes.
-     */
-    private static HashMap<LanguageCode, Class<? extends SnowballProgram>> snowballStemmerClasses;
-    static {
-      snowballStemmerClasses = new HashMap<>();
-      snowballStemmerClasses.put(LanguageCode.DANISH, DanishStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.DUTCH, DutchStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.ENGLISH, EnglishStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.FINNISH, FinnishStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.FRENCH, FrenchStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.GERMAN, GermanStemmer.class);
-      snowballStemmerClasses
-          .put(LanguageCode.HUNGARIAN, HungarianStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.ITALIAN, ItalianStemmer.class);
-      snowballStemmerClasses
-          .put(LanguageCode.NORWEGIAN, NorwegianStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.PORTUGUESE,
-          PortugueseStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.ROMANIAN, RomanianStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.RUSSIAN, RussianStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.SPANISH, SpanishStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.SWEDISH, SwedishStemmer.class);
-      snowballStemmerClasses.put(LanguageCode.TURKISH, TurkishStemmer.class);
-    }
-
-    /**
-     * An adapter converting Snowball programs into {@link IStemmer} interface.
-     */
-    private static class SnowballStemmerAdapter implements IStemmer {
-      private final SnowballProgram snowballStemmer;
-
-      public SnowballStemmerAdapter(SnowballProgram snowballStemmer) {
-        this.snowballStemmer = snowballStemmer;
-      }
-
-      @Override
-      public CharSequence stem(CharSequence word) {
-        snowballStemmer.setCurrent(word.toString());
-        if (snowballStemmer.stem()) {
-          return snowballStemmer.getCurrent();
-        } else {
-          return null;
-        }
-      }
-    }
-
-    /**
-     * Create and return an {@link IStemmer} adapter for a
-     * {@link SnowballProgram} for a given language code. An identity stemmer is
-     * returned for unknown languages.
-     */
-    public static IStemmer createStemmer(LanguageCode language) {
-      final Class<? extends SnowballProgram> stemmerClazz = snowballStemmerClasses
-          .get(language);
-
-      if (stemmerClazz == null) {
-        log.warn("No Snowball stemmer class for: {}. "
-            + "Quality of clustering may be degraded.", language.name());
-        return IdentityStemmer.INSTANCE;
-      }
-
-      try {
-        return new SnowballStemmerAdapter(stemmerClazz.newInstance());
-      } catch (Exception e) {
-        log.warn("Could not instantiate snowball stemmer for language: {}"
-                + ". Quality of clustering may be degraded."
-            , language.name(), e);
-
-        return IdentityStemmer.INSTANCE;
-      }
-    }
-  }
-
-  /**
-   * Factory of {@link IStemmer} implementations for the
-   * {@link LanguageCode#ARABIC} language. Requires <code>lucene-contrib</code>
-   * to be present in classpath, otherwise an empty (identity) stemmer is
-   * returned.
-   */
-  private static class ArabicStemmerFactory {
-    static {
-      try {
-        ReflectionUtils.classForName(ArabicStemmer.class.getName(), false);
-        ReflectionUtils.classForName(ArabicNormalizer.class.getName(), false);
-      } catch (ClassNotFoundException e) {
-        log
-            .warn(
-                "Could not instantiate Lucene stemmer for Arabic, clustering quality "
-                    + "of Arabic content may be degraded. For best quality clusters, "
-                    + "make sure Lucene's Arabic analyzer JAR is in the classpath",
-                e);
-      }
-    }
-
-    /**
-     * Adapter to lucene-contrib Arabic analyzers.
-     */
-    private static class LuceneStemmerAdapter implements IStemmer {
-      private final org.apache.lucene.analysis.ar.ArabicStemmer delegate;
-      private final org.apache.lucene.analysis.ar.ArabicNormalizer normalizer;
-
-      private char[] buffer = new char[0];
-
-      private LuceneStemmerAdapter() {
-        delegate = new org.apache.lucene.analysis.ar.ArabicStemmer();
-        normalizer = new org.apache.lucene.analysis.ar.ArabicNormalizer();
-      }
-
-      @Override
-      public CharSequence stem(CharSequence word) {
-        if (word.length() > buffer.length) {
-          buffer = new char[word.length()];
-        }
-
-        for (int i = 0; i < word.length(); i++) {
-          buffer[i] = word.charAt(i);
-        }
-
-        int newLen = normalizer.normalize(buffer, word.length());
-        newLen = delegate.stem(buffer, newLen);
-
-        if (newLen != word.length() || !equals(buffer, newLen, word)) {
-          return CharBuffer.wrap(buffer, 0, newLen);
-        }
-
-        // Same-same.
-        return null;
-      }
-
-      private boolean equals(char[] buffer, int len, CharSequence word) {
-        assert len == word.length();
-
-        for (int i = 0; i < len; i++) {
-          if (buffer[i] != word.charAt(i))
-            return false;
-        }
-
-        return true;
-      }
-    }
-
-    public static IStemmer createStemmer() {
-      try {
-        return new LuceneStemmerAdapter();
-      } catch (Exception e) {
-        return IdentityStemmer.INSTANCE;
-      }
-    }
-  }
-
-  /**
-   * An implementation of {@link IStemmer} that always returns <code>null</code>
-   * which means no stemming.
-   */
-  private static class IdentityStemmer implements IStemmer {
-    private final static IdentityStemmer INSTANCE = new IdentityStemmer();
-
-    @Override
-    public CharSequence stem(CharSequence word) {
-      return null;
-    }
-  }
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2TokenizerFactory.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2TokenizerFactory.java
deleted file mode 100644
index b710c02..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2TokenizerFactory.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.io.IOException;
-import java.io.Reader;
-import java.lang.invoke.MethodHandles;
-import java.util.regex.Pattern;
-
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.text.analysis.ExtendedWhitespaceTokenizer;
-import org.carrot2.text.analysis.ITokenizer;
-import org.carrot2.text.linguistic.ITokenizerFactory;
-import org.carrot2.text.util.MutableCharArray;
-import org.carrot2.util.ExceptionUtils;
-import org.carrot2.util.ReflectionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * An implementation of Carrot2's {@link ITokenizerFactory} based on Lucene's
- * Smart Chinese tokenizer. If Smart Chinese tokenizer is not available in
- * classpath at runtime, the default Carrot2's tokenizer is used. Should the
- * Lucene APIs need to change, the changes can be made in this class.
- * 
- * @lucene.experimental
- */
-public class LuceneCarrot2TokenizerFactory implements ITokenizerFactory {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public ITokenizer getTokenizer(LanguageCode language) {
-    switch (language) {
-    case CHINESE_SIMPLIFIED:
-      return ChineseTokenizerFactory.createTokenizer();
-
-      /*
-       * We use our own analyzer for Arabic. Lucene's version has special
-       * support for Nonspacing-Mark characters (see
-       * http://www.fileformat.info/info/unicode/category/Mn/index.htm), but we
-       * have them included as letters in the parser.
-       */
-    case ARABIC:
-      // Intentional fall-through.
-
-    default:
-      return new ExtendedWhitespaceTokenizer();
-    }
-  }
-
-  /**
-   * Creates tokenizers that adapt Lucene's Smart Chinese Tokenizer to Carrot2's
-   * {@link ITokenizer}. If Smart Chinese is not available in the classpath, the
-   * factory will fall back to the default white space tokenizer.
-   */
-  private static final class ChineseTokenizerFactory {
-    static {
-      try {
-        ReflectionUtils.classForName(
-            "org.apache.lucene.analysis.cn.smart.WordTokenFilter", false);
-        ReflectionUtils.classForName(
-            "org.apache.lucene.analysis.cn.smart.SentenceTokenizer", false);
-      } catch (Throwable e) {
-        log
-            .warn("Could not instantiate Smart Chinese Analyzer, clustering quality "
-                + "of Chinese content may be degraded. For best quality clusters, "
-                + "make sure Lucene's Smart Chinese Analyzer JAR is in the classpath");
-        if (e instanceof Error) {
-          throw (Error) e;
-        }
-      }
-    }
-
-    static ITokenizer createTokenizer() {
-      try {
-        return new ChineseTokenizer();
-      } catch (Throwable e) {
-        if (e instanceof OutOfMemoryError) {
-          throw (OutOfMemoryError) e;
-        }
-        return new ExtendedWhitespaceTokenizer();
-      }
-    }
-
-    private final static class ChineseTokenizer implements ITokenizer {
-      private final static Pattern numeric = Pattern
-          .compile("[\\-+'$]?\\d+([:\\-/,.]?\\d+)*[%$]?");
-
-      private Tokenizer sentenceTokenizer;
-      private TokenStream wordTokenFilter;
-      private CharTermAttribute term = null;
-
-      private final MutableCharArray tempCharSequence;
-      private final Class<?> tokenFilterClass;
-
-      private ChineseTokenizer() throws Exception {
-        this.tempCharSequence = new MutableCharArray(new char[0]);
-
-        // As Smart Chinese is not available during compile time,
-        // we need to resort to reflection.
-        final Class<?> tokenizerClass = ReflectionUtils.classForName(
-            "org.apache.lucene.analysis.cn.smart.SentenceTokenizer", false);
-        this.sentenceTokenizer = (Tokenizer) tokenizerClass.getConstructor(
-            Reader.class).newInstance((Reader) null);
-        this.tokenFilterClass = ReflectionUtils.classForName(
-            "org.apache.lucene.analysis.cn.smart.WordTokenFilter", false);
-      }
-
-      @Override
-      public short nextToken() throws IOException {
-        final boolean hasNextToken = wordTokenFilter.incrementToken();
-        if (hasNextToken) {
-          short flags = 0;
-          final char[] image = term.buffer();
-          final int length = term.length();
-          tempCharSequence.reset(image, 0, length);
-          if (length == 1 && image[0] == ',') {
-            // ChineseTokenizer seems to convert all punctuation to ','
-            // characters
-            flags = ITokenizer.TT_PUNCTUATION;
-          } else if (numeric.matcher(tempCharSequence).matches()) {
-            flags = ITokenizer.TT_NUMERIC;
-          } else {
-            flags = ITokenizer.TT_TERM;
-          }
-          return flags;
-        }
-
-        return ITokenizer.TT_EOF;
-      }
-
-      @Override
-      public void setTermBuffer(MutableCharArray array) {
-        array.reset(term.buffer(), 0, term.length());
-      }
-
-      @Override
-      public void reset(Reader input) {
-        try {
-          sentenceTokenizer.setReader(input);
-          wordTokenFilter = (TokenStream) tokenFilterClass.getConstructor(
-              TokenStream.class).newInstance(sentenceTokenizer);
-          term = wordTokenFilter.addAttribute(CharTermAttribute.class);
-        } catch (Exception e) {
-          throw ExceptionUtils.wrapAsRuntimeException(e);
-        }
-      }
-    }
-  }
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/SolrResourceLocator.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/SolrResourceLocator.java
deleted file mode 100644
index 4c09799..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/SolrResourceLocator.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrResourceLoader;
-import org.carrot2.util.resource.IResource;
-import org.carrot2.util.resource.IResourceLocator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A {@link IResourceLocator} that delegates resource searches to {@link SolrCore}.
- * 
- * @lucene.experimental
- */
-class SolrResourceLocator implements IResourceLocator {
-  private final SolrResourceLoader resourceLoader;
-  private final String carrot2ResourcesDir;
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public SolrResourceLocator(SolrCore core, SolrParams initParams) {
-    resourceLoader = core.getResourceLoader();
-    
-    String resourcesDir = initParams.get(CarrotParams.RESOURCES_DIR);
-    carrot2ResourcesDir = firstNonNull(resourcesDir, CarrotClusteringEngine.CARROT_RESOURCES_PREFIX);
-  }
-
-  @SuppressWarnings("unchecked")
-  public static <T> T firstNonNull(T... args) {
-    for (T t : args) {
-      if (t != null) return t;
-    }
-    throw new NullPointerException("At least one element has to be non-null.");
-  }
-
-  @Override
-  public IResource[] getAll(final String resource) {
-    final String resourceName = carrot2ResourcesDir + "/" + resource;
-    log.debug("Looking for Solr resource: {}", resourceName);
-
-    InputStream resourceStream = null;
-    final byte [] asBytes;
-    try {
-      resourceStream = resourceLoader.openResource(resourceName);
-      asBytes = IOUtils.toByteArray(resourceStream);
-    } catch (IOException e) {
-      log.debug("Resource not found in Solr's config: {}. Using the default {} from Carrot JAR."
-          , resourceName,  resource);
-      return new IResource[] {};
-    } finally {
-      if (resourceStream != null) {
-        try {
-          resourceStream.close();
-        } catch (IOException e) {
-          // ignore.
-        }
-      }
-    }
-
-    log.info("Loaded Solr resource: {}", resourceName);
-
-    final IResource foundResource = new IResource() {
-      @Override
-      public InputStream open() {
-        return new ByteArrayInputStream(asBytes);
-      }
-
-      @Override
-      public int hashCode() {
-        // In case multiple resources are found they will be deduped, but we don't use it in Solr,
-        // so simply rely on instance equivalence.
-        return super.hashCode();
-      }
-      
-      @Override
-      public boolean equals(Object obj) {
-        // In case multiple resources are found they will be deduped, but we don't use it in Solr,
-        // so simply rely on instance equivalence.
-        return super.equals(obj);
-      }
-
-      @Override
-      public String toString() {
-        return "Solr config resource: " + resourceName;
-      }
-    };
-
-    return new IResource[] { foundResource };
-  }
-
-  @Override
-  public int hashCode() {
-    // In case multiple locations are used locators will be deduped, but we don't use it in Solr,
-    // so simply rely on instance equivalence.
-    return super.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    // In case multiple locations are used locators will be deduped, but we don't use it in Solr,
-    // so simply rely on instance equivalence.
-    return super.equals(obj);
-  }
-
-  @Override
-  public String toString() {
-    String configDir = "";
-    try {
-      configDir = "configDir=" + new File(resourceLoader.getConfigDir()).getAbsolutePath() + ", ";
-    } catch (Exception ignored) {
-      // If we get the exception, the resource loader implementation
-      // probably does not support getConfigDir(). Not a big problem.
-    }
-    
-    return "SolrResourceLocator, " + configDir
-        + "Carrot2 relative lexicalResourcesDir=" + carrot2ResourcesDir;
-  }
-}
\ No newline at end of file
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/SolrStopwordsCarrot2LexicalDataFactory.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/SolrStopwordsCarrot2LexicalDataFactory.java
deleted file mode 100644
index 569b1bb..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/SolrStopwordsCarrot2LexicalDataFactory.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.analysis.commongrams.CommonGramsFilterFactory;
-import org.apache.lucene.analysis.core.StopFilterFactory;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-import org.apache.solr.analysis.TokenizerChain;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.schema.IndexSchema;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.core.attribute.Init;
-import org.carrot2.core.attribute.Processing;
-import org.carrot2.text.linguistic.DefaultLexicalDataFactory;
-import org.carrot2.text.linguistic.ILexicalData;
-import org.carrot2.text.linguistic.ILexicalDataFactory;
-import org.carrot2.text.util.MutableCharArray;
-import org.carrot2.util.attribute.Attribute;
-import org.carrot2.util.attribute.Bindable;
-import org.carrot2.util.attribute.Input;
-
-/**
- * An implementation of Carrot2's {@link ILexicalDataFactory} that adds stop
- * words from a field's StopFilter to the default stop words used in Carrot2,
- * for all languages Carrot2 supports. Completely replacing Carrot2 stop words
- * with Solr's wouldn't make much sense because clustering needs more aggressive
- * stop words removal. In other words, if something is a stop word during
- * indexing, then it should also be a stop word during clustering, but not the
- * other way round.
- * 
- * @lucene.experimental
- */
-@Bindable
-public class SolrStopwordsCarrot2LexicalDataFactory implements ILexicalDataFactory {
-
-  @Init
-  @Input
-  @Attribute(key = "solrCore")
-  public SolrCore core;
-
-  @Processing
-  @Input
-  @Attribute(key = "solrFieldNames")
-  public Set<String> fieldNames;
-
-  /**
-   * A lazily-built cache of stop words per field.
-   */
-  private HashMap<String, List<CharArraySet>> solrStopWords = new HashMap<>();
-
-  /**
-   * Carrot2's default lexical resources to use in addition to Solr's stop
-   * words.
-   */
-  public DefaultLexicalDataFactory carrot2LexicalDataFactory = new DefaultLexicalDataFactory();
-
-  /**
-   * Obtains stop words for a field from the associated
-   * {@link StopFilterFactory}, if any.
-   */
-  private List<CharArraySet> getSolrStopWordsForField(String fieldName) {
-    // No need to synchronize here, Carrot2 ensures that instances
-    // of this class are not used by multiple threads at a time.
-    synchronized (solrStopWords) {
-      if (!solrStopWords.containsKey(fieldName)) {
-        solrStopWords.put(fieldName, new ArrayList<>());
-
-        IndexSchema schema = core.getLatestSchema();
-        final Analyzer fieldAnalyzer = schema.getFieldType(fieldName).getIndexAnalyzer();
-        if (fieldAnalyzer instanceof TokenizerChain) {
-          final TokenFilterFactory[] filterFactories = 
-              ((TokenizerChain) fieldAnalyzer).getTokenFilterFactories();
-          for (TokenFilterFactory factory : filterFactories) {
-            if (factory instanceof StopFilterFactory) {
-              // StopFilterFactory holds the stop words in a CharArraySet
-              CharArraySet stopWords = ((StopFilterFactory) factory).getStopWords();
-              solrStopWords.get(fieldName).add(stopWords);
-            }
-
-            if (factory instanceof CommonGramsFilterFactory) {
-              CharArraySet commonWords = ((CommonGramsFilterFactory) factory).getCommonWords();
-              solrStopWords.get(fieldName).add(commonWords);
-            }
-          }
-        }
-      }
-      return solrStopWords.get(fieldName);
-    }
-  }
-
-  @Override
-  public ILexicalData getLexicalData(LanguageCode languageCode) {
-    final ILexicalData carrot2LexicalData = carrot2LexicalDataFactory
-        .getLexicalData(languageCode);
-
-    return new ILexicalData() {
-      @Override
-      public boolean isStopLabel(CharSequence word) {
-        // Nothing in Solr maps to the concept of a stop label,
-        // so return Carrot2's default here.
-        return carrot2LexicalData.isStopLabel(word);
-      }
-
-      @Override
-      public boolean isCommonWord(MutableCharArray word) {
-        // Loop over the fields involved in clustering first
-        for (String fieldName : fieldNames) {
-          for (CharArraySet stopWords : getSolrStopWordsForField(fieldName)) {
-            if (stopWords.contains(word)) {
-              return true;
-            }
-          }
-        }
-        // Check default Carrot2 stop words too
-        return carrot2LexicalData.isCommonWord(word);
-      }
-    };
-  }
-}
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/package-info.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/package-info.java
deleted file mode 100644
index 6d0d49b..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/package-info.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * {@link org.apache.solr.handler.clustering.carrot2.CarrotClusteringEngine} and related classes for use in the {@link org.apache.solr.handler.clustering.ClusteringComponent}.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-
-
-
diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/package-info.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/package-info.java
deleted file mode 100644
index 25cca33..0000000
--- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/package-info.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * {@link org.apache.solr.handler.clustering.ClusteringComponent} and common APIs for specific implementations.
-
- */
-package org.apache.solr.handler.clustering;
-
-
-
-
diff --git a/solr/contrib/clustering/src/java/overview.html b/solr/contrib/clustering/src/java/overview.html
deleted file mode 100644
index 59940f6..0000000
--- a/solr/contrib/clustering/src/java/overview.html
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<html>
-<body>
-Apache Solr Search Server: text clustering contrib
-</body>
-</html>
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/mock-external-attrs-attributes.xml b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/mock-external-attrs-attributes.xml
deleted file mode 100644
index 63557c3..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/mock-external-attrs-attributes.xml
+++ /dev/null
@@ -1,10 +0,0 @@
-<attribute-sets default="overridden-attributes">
-  <attribute-set id="overridden-attributes">
-    <value-set>
-      <label>defaults</label>
-      <attribute key="MockClusteringAlgorithm.depth"><value value="1" /></attribute>
-      <attribute key="MockClusteringAlgorithm.labels"><value value="3" /></attribute>
-      <attribute key="MockClusteringAlgorithm.maxClusters"><value value="13" /></attribute>
-    </value-set>
-  </attribute-set>
-</attribute-sets>
\ No newline at end of file
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/stoplabels.mt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/stoplabels.mt
deleted file mode 100644
index 71b2dde..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/stoplabels.mt
+++ /dev/null
@@ -1 +0,0 @@
-customsolrstoplabel
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/stopwords.mt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/stopwords.mt
deleted file mode 100644
index 5dd05b1..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/carrot2/stopwords.mt
+++ /dev/null
@@ -1 +0,0 @@
-customsolrstopword
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/custom/stoplabels.mt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/custom/stoplabels.mt
deleted file mode 100644
index 6ee28d8..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/custom/stoplabels.mt
+++ /dev/null
@@ -1 +0,0 @@
-customsolrstoplabelcustomdir
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/custom/stopwords.mt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/custom/stopwords.mt
deleted file mode 100644
index 6e604da..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/clustering/custom/stopwords.mt
+++ /dev/null
@@ -1 +0,0 @@
-customsolrstopwordcustomdir
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/mapping-ISOLatin1Accent.txt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/mapping-ISOLatin1Accent.txt
deleted file mode 100644
index ede7742..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/mapping-ISOLatin1Accent.txt
+++ /dev/null
@@ -1,246 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Syntax:
-#   "source" => "target"
-#     "source".length() > 0 (source cannot be empty.)
-#     "target".length() >= 0 (target can be empty.)
-
-# example:
-#   "À" => "A"
-#   "\u00C0" => "A"
-#   "\u00C0" => "\u0041"
-#   "ß" => "ss"
-#   "\t" => " "
-#   "\n" => ""
-
-# À => A
-"\u00C0" => "A"
-
-# Á => A
-"\u00C1" => "A"
-
-# Â => A
-"\u00C2" => "A"
-
-# Ã => A
-"\u00C3" => "A"
-
-# Ä => A
-"\u00C4" => "A"
-
-# Å => A
-"\u00C5" => "A"
-
-# Æ => AE
-"\u00C6" => "AE"
-
-# Ç => C
-"\u00C7" => "C"
-
-# È => E
-"\u00C8" => "E"
-
-# É => E
-"\u00C9" => "E"
-
-# Ê => E
-"\u00CA" => "E"
-
-# Ë => E
-"\u00CB" => "E"
-
-# Ì => I
-"\u00CC" => "I"
-
-# Í => I
-"\u00CD" => "I"
-
-# Î => I
-"\u00CE" => "I"
-
-# Ï => I
-"\u00CF" => "I"
-
-# IJ => IJ
-"\u0132" => "IJ"
-
-# Ð => D
-"\u00D0" => "D"
-
-# Ñ => N
-"\u00D1" => "N"
-
-# Ò => O
-"\u00D2" => "O"
-
-# Ó => O
-"\u00D3" => "O"
-
-# Ô => O
-"\u00D4" => "O"
-
-# Õ => O
-"\u00D5" => "O"
-
-# Ö => O
-"\u00D6" => "O"
-
-# Ø => O
-"\u00D8" => "O"
-
-# Π=> OE
-"\u0152" => "OE"
-
-# Þ
-"\u00DE" => "TH"
-
-# Ù => U
-"\u00D9" => "U"
-
-# Ú => U
-"\u00DA" => "U"
-
-# Û => U
-"\u00DB" => "U"
-
-# Ü => U
-"\u00DC" => "U"
-
-# Ý => Y
-"\u00DD" => "Y"
-
-# Ÿ => Y
-"\u0178" => "Y"
-
-# à => a
-"\u00E0" => "a"
-
-# á => a
-"\u00E1" => "a"
-
-# â => a
-"\u00E2" => "a"
-
-# ã => a
-"\u00E3" => "a"
-
-# ä => a
-"\u00E4" => "a"
-
-# å => a
-"\u00E5" => "a"
-
-# æ => ae
-"\u00E6" => "ae"
-
-# ç => c
-"\u00E7" => "c"
-
-# è => e
-"\u00E8" => "e"
-
-# é => e
-"\u00E9" => "e"
-
-# ê => e
-"\u00EA" => "e"
-
-# ë => e
-"\u00EB" => "e"
-
-# ì => i
-"\u00EC" => "i"
-
-# í => i
-"\u00ED" => "i"
-
-# î => i
-"\u00EE" => "i"
-
-# ï => i
-"\u00EF" => "i"
-
-# ij => ij
-"\u0133" => "ij"
-
-# ð => d
-"\u00F0" => "d"
-
-# ñ => n
-"\u00F1" => "n"
-
-# ò => o
-"\u00F2" => "o"
-
-# ó => o
-"\u00F3" => "o"
-
-# ô => o
-"\u00F4" => "o"
-
-# õ => o
-"\u00F5" => "o"
-
-# ö => o
-"\u00F6" => "o"
-
-# ø => o
-"\u00F8" => "o"
-
-# œ => oe
-"\u0153" => "oe"
-
-# ß => ss
-"\u00DF" => "ss"
-
-# þ => th
-"\u00FE" => "th"
-
-# ù => u
-"\u00F9" => "u"
-
-# ú => u
-"\u00FA" => "u"
-
-# û => u
-"\u00FB" => "u"
-
-# ü => u
-"\u00FC" => "u"
-
-# ý => y
-"\u00FD" => "y"
-
-# ÿ => y
-"\u00FF" => "y"
-
-# ff => ff
-"\uFB00" => "ff"
-
-# fi => fi
-"\uFB01" => "fi"
-
-# fl => fl
-"\uFB02" => "fl"
-
-# ffi => ffi
-"\uFB03" => "ffi"
-
-# ffl => ffl
-"\uFB04" => "ffl"
-
-# ſt => ft
-"\uFB05" => "ft"
-
-# st => st
-"\uFB06" => "st"
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/protwords.txt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/protwords.txt
deleted file mode 100644
index 1dfc0ab..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/protwords.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-# Use a protected word file to protect against the stemmer reducing two
-# unrelated words to the same base word.
-
-# Some non-words that normally won't be encountered,
-# just to test that they won't be stemmed.
-dontstems
-zwhacky
-
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
deleted file mode 100644
index e14c607..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
+++ /dev/null
@@ -1,350 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!--  
- This is the Solr schema file. This file should be named "schema.xml" and
- should be in the conf directory under the solr home
- (i.e. ./solr/conf/schema.xml by default) 
- or located where the classloader for the Solr webapp can find it.
-
- This example schema is the recommended starting point for users.
- It should be kept correct and concise, usable out-of-the-box.
-
- For more information, on how to customize this file, please see
- http://wiki.apache.org/solr/SchemaXml
--->
-
-<schema name="example" version="1.1">
-  <!-- attribute "name" is the name of this schema and is only used for display purposes.
-       Applications should change this to reflect the nature of the search collection.
-       version="1.1" is Solr's version number for the schema syntax and semantics.  It should
-       not normally be changed by applications.
-       1.0: multiValued attribute did not exist, all fields are multiValued by nature
-       1.1: multiValued attribute introduced, false by default -->
-
-
-  <!-- field type definitions. The "name" attribute is
-     just a label to be used by field definitions.  The "class"
-     attribute and any other attributes determine the real
-     behavior of the fieldType.
-       Class names starting with "solr" refer to java classes in the
-     org.apache.solr.analysis package.
-  -->
-
-  <!-- The StrField type is not analyzed, but indexed/stored verbatim.  
-     - StrField and TextField support an optional compressThreshold which
-     limits compression (if enabled in the derived fields) to values which
-     exceed a certain size (in characters).
-  -->
-  <fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="true"/>
-
-  <!-- boolean type: "true" or "false" -->
-  <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" omitNorms="true"/>
-
-  <!-- The optional sortMissingLast and sortMissingFirst attributes are
-       currently supported on types that are sorted internally as strings.
-     - If sortMissingLast="true", then a sort on this field will cause documents
-       without the field to come after documents with the field,
-       regardless of the requested sort order (asc or desc).
-     - If sortMissingFirst="true", then a sort on this field will cause documents
-       without the field to come before documents with the field,
-       regardless of the requested sort order.
-     - If sortMissingLast="false" and sortMissingFirst="false" (the default),
-       then default lucene sorting will be used which places docs without the
-       field first in an ascending sort and last in a descending sort.
-  -->
-
-  <!--
-    Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-  -->
-  <fieldType name="int" class="${solr.tests.IntegerFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
-  <fieldType name="float" class="${solr.tests.FloatFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
-  <fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
-  <fieldType name="double" class="${solr.tests.DoubleFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
-
-  <!--
-   Numeric field types that index each value at various levels of precision
-   to accelerate range queries when the number of values between the range
-   endpoints is large. See the javadoc for LegacyNumericRangeQuery for internal
-   implementation details.
-  
-   Smaller precisionStep values (specified in bits) will lead to more tokens
-   indexed per value, slightly larger index size, and faster range queries.
-   A precisionStep of 0 disables indexing at different precision levels.
-  -->
-  <fieldType name="tint" class="${solr.tests.IntegerFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="8" positionIncrementGap="0"/>
-  <fieldType name="tfloat" class="${solr.tests.FloatFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="8" positionIncrementGap="0"/>
-  <fieldType name="tlong" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="8" positionIncrementGap="0"/>
-  <fieldType name="tdouble" class="${solr.tests.DoubleFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="8" positionIncrementGap="0"/>
-
-
-  <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
-       is a more restricted form of the canonical representation of dateTime
-       http://www.w3.org/TR/xmlschema-2/#dateTime    
-       The trailing "Z" designates UTC time and is mandatory.
-       Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
-       All other components are mandatory.
-  
-       Expressions can also be used to denote calculations that should be
-       performed relative to "NOW" to determine the value, ie...
-  
-             NOW/HOUR
-                ... Round to the start of the current hour
-             NOW-1DAY
-                ... Exactly 1 day prior to now
-             NOW/DAY+6MONTHS+3DAYS
-                ... 6 months and 3 days in the future from the start of
-                    the current day
-                    
-       Consult the TrieDateField javadocs for more information.
-    -->
-  <fieldType name="date" class="${solr.tests.DateFieldType}" docValues="${solr.tests.numeric.dv}" sortMissingLast="true" omitNorms="true"/>
-
-
-  <!-- The "RandomSortField" is not used to store or search any
-       data.  You can declare fields of this type it in your schema
-       to generate psuedo-random orderings of your docs for sorting 
-       purposes.  The ordering is generated based on the field name 
-       and the version of the index, As long as the index version
-       remains unchanged, and the same field name is reused,
-       the ordering of the docs will be consistent.  
-       If you want differend psuedo-random orderings of documents,
-       for the same version of the index, use a dynamicField and
-       change the name
-   -->
-  <fieldType name="random" class="solr.RandomSortField" indexed="true"/>
-
-  <!-- solr.TextField allows the specification of custom text analyzers
-       specified as a tokenizer and a list of token filters. Different
-       analyzers may be specified for indexing and querying.
-  
-       The optional positionIncrementGap puts space between multiple fields of
-       this type on the same document, with the purpose of preventing false phrase
-       matching across fields.
-  
-       For more info on customizing your analyzer chain, please see
-       http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
-   -->
-
-  <!-- One can also specify an existing Analyzer class that has a
-       default constructor via the class attribute on the analyzer element
-  <fieldType name="text_greek" class="solr.TextField">
-    <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
-  </fieldType>
-  -->
-
-  <!-- A text field that only splits on whitespace for exact matching of words -->
-  <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer class="solr.MockTokenizerFactory"/>
-    </analyzer>
-  </fieldType>
-
-  <!-- A text field that uses WordDelimiterGraphFilter to enable splitting and matching of
-      words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
-      so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
-      Synonyms and stopwords are customized by external files, and stemming is enabled.
-      Duplicate tokens at the same position (which may result from Stemmed Synonyms or
-      WordDelim parts) are removed.
-      -->
-  <fieldType name="text" class="solr.TextField" positionIncrementGap="100">
-    <analyzer type="index">
-      <tokenizer class="solr.MockTokenizerFactory"/>
-      <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-      -->
-      <!-- Case insensitive stop word removal.
-      -->
-      <filter class="solr.StopFilterFactory"
-              ignoreCase="true"
-              words="stopwords.txt"
-      />
-      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
-              catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-      <filter class="solr.PorterStemFilterFactory"/>
-      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-      <filter class="solr.FlattenGraphFilterFactory" />
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
-      <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
-              catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-      <filter class="solr.PorterStemFilterFactory"/>
-      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-    </analyzer>
-  </fieldType>
-
-
-  <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
-       but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
-  <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
-    <analyzer type="index">
-      <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
-      <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
-              catenateNumbers="1" catenateAll="0"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-      <filter class="solr.EnglishMinimalStemFilterFactory"/>
-      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-      <filter class="solr.FlattenGraphFilterFactory" />
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
-      <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
-              catenateNumbers="1" catenateAll="0"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-      <filter class="solr.EnglishMinimalStemFilterFactory"/>
-      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-    </analyzer>
-  </fieldType>
-
-  <!--
-   Setup simple analysis for spell checking
-   -->
-  <fieldType name="textSpell" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-    </analyzer>
-  </fieldType>
-
-  <!-- This is an example of using the KeywordTokenizer along
-       With various TokenFilterFactories to produce a sortable field
-       that does not include some properties of the source text
-    -->
-  <fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="true">
-    <analyzer>
-      <!-- KeywordTokenizer does no actual tokenizing, so the entire
-           input string is preserved as a single token
-        -->
-      <tokenizer class="solr.MockTokenizerFactory" pattern="keyword"/>
-      <!-- The LowerCase TokenFilter does what you expect, which can be
-           when you want your sorting to be case insensitive
-        -->
-      <filter class="solr.LowerCaseFilterFactory"/>
-      <!-- The TrimFilter removes any leading or trailing whitespace -->
-      <filter class="solr.TrimFilterFactory"/>
-      <!-- The PatternReplaceFilter gives you the flexibility to use
-           Java Regular expression to replace any sequence of characters
-           matching a pattern with an arbitrary replacement string, 
-           which may include back refrences to portions of the orriginal
-           string matched by the pattern.
-           
-           See the Java Regular Expression documentation for more
-           infomation on pattern and replacement string syntax.
-           
-           http://docs.oracle.com/javase/8/docs/api/java/util/regex/package-summary.html
-        -->
-      <filter class="solr.PatternReplaceFilterFactory"
-              pattern="([^a-z])" replacement="" replace="all"
-      />
-    </analyzer>
-  </fieldType>
-
-  <!-- since fields of this type are by default not stored or indexed, any data added to 
-       them will be ignored outright 
-   -->
-  <fieldType name="ignored" stored="false" indexed="false" class="solr.StrField"/>
-
-
-  <!-- Valid attributes for fields:
-    name: mandatory - the name for the field
-    type: mandatory - the name of a previously defined type from the <fieldType>s
-    indexed: true if this field should be indexed (searchable or sortable)
-    stored: true if this field should be retrievable
-    multiValued: true if this field may contain multiple values per document
-    omitNorms: (expert) set to true to omit the norms associated with
-      this field (this disables length normalization and index-time
-      boosting for the field, and saves some memory).  Only full-text
-      fields or fields that need an index-time boost need norms.
-    termVectors: [false] set to true to store the term vector for a given field.
-      When using MoreLikeThis, fields used for similarity should be stored for 
-      best performance.
-  -->
-
-  <field name="id" type="string" indexed="true" stored="true" required="true"/>
-  <field name="url" type="string" indexed="true" stored="true" required="false"/>
-  <field name="lang" type="string" indexed="true" stored="true" required="false" multiValued="true"/>
-
-  <field name="title" type="text" indexed="true" stored="true" multiValued="true"/>
-  <field name="heading" type="text" indexed="true" stored="true" multiValued="true"/>
-  <field name="snippet" type="text" indexed="true" stored="true" multiValued="true"/>
-  <field name="body" type="text" indexed="true" stored="true" multiValued="true"/>
-  <!-- catchall field, containing all other searchable text fields (implemented
-       via copyField further on in this schema  -->
-  <field name="text" type="text" indexed="true" stored="false" multiValued="true"/>
-  <!-- Dynamic field definitions.  If a field name is not found, dynamicFields
-       will be used if the name matches any of the patterns.
-       RESTRICTION: the glob-like pattern in the name attribute must have
-       a "*" only at the start or the end.
-       EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, z_i)
-       Longer patterns will be matched first.  if equal size patterns
-       both match, the first appearing in the schema will be used.  -->
-  <dynamicField name="*_i" type="int" indexed="true" stored="true"/>
-  <dynamicField name="*_s" type="string" indexed="true" stored="true"/>
-  <dynamicField name="*_l" type="long" indexed="true" stored="true"/>
-  <dynamicField name="*_t" type="text" indexed="true" stored="true"/>
-  <dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
-  <dynamicField name="*_f" type="float" indexed="true" stored="true"/>
-  <dynamicField name="*_d" type="double" indexed="true" stored="true"/>
-  <dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
-
-  <dynamicField name="random*" type="random"/>
-
-  <dynamicField name="*_dynamic" type="string" indexed="true" stored="true"/>
-  <dynamicField name="dynamic_*" type="string" indexed="true" stored="true"/>
-
-
-  <!-- uncomment the following to ignore any fields that don't already match an existing 
-       field name or dynamic field, rather than reporting them as an error. 
-       alternately, change the type="ignored" to some other type e.g. "text" if you want 
-       unknown fields indexed and/or stored by default -->
-  <!--dynamicField name="*" type="ignored" /-->
-
-
-  <!-- Field to use to determine and enforce document uniqueness. 
-       Unless this field is marked with required="false", it will be a required field
-    -->
-  <uniqueKey>id</uniqueKey>
-
-  <!-- copyField commands copy one field to another at the time a document
-        is added to the index.  It's used either to index the same field differently,
-        or to add multiple fields to the same field for easier/faster searching.  -->
-  <copyField source="url" dest="text"/>
-  <copyField source="title" dest="text"/>
-  <copyField source="body" dest="text"/>
-  <copyField source="snippet" dest="text"/>
-
-  <!-- dynamic destination -->
-  <copyField source="*_dynamic" dest="dynamic_*"/>
-
-  <copyField source="id" dest="range_facet_l"/>
-
-</schema>
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/solrconfig.xml b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/solrconfig.xml
deleted file mode 100644
index 24aa2e5..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/solrconfig.xml
+++ /dev/null
@@ -1,440 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<config>
-  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
-
-  <!-- Used to specify an alternate directory to hold all index data
-       other than the default ./data under the Solr home.
-       If replication is in use, this should match the replication configuration. -->
-  <dataDir>${solr.data.dir:}</dataDir>
-
-  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
-  <schemaFactory class="ClassicIndexSchemaFactory"/>
-
-  <indexConfig>
-    <lockType>single</lockType>
-    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
-  </indexConfig>
-  
-  <!--  Enables JMX if and only if an existing MBeanServer is found, use 
-      this if you want to configure JMX through JVM parameters. Remove
-      this to disable exposing Solr configuration and statistics to JMX.
-      
-    If you want to connect to a particular server, specify the agentId
-    e.g. <jmx agentId="myAgent" />
-    
-    If you want to start a new MBeanServer, specify the serviceUrl
-    e.g <jmx serviceurl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr" />
-    
-    For more details see http://wiki.apache.org/solr/SolrJmx
-  -->
-  <jmx />
-
-  <!-- the default high-performance update handler -->
-  <updateHandler class="solr.DirectUpdateHandler2">
-
-    <!-- A prefix of "solr." for class names is an alias that
-         causes solr to search appropriate packages, including
-         org.apache.solr.(search|update|request|core|analysis)
-     -->
-
-    <!-- Perform a <commit/> automatically under certain conditions:
-         maxDocs - number of updates since last commit is greater than this
-         maxTime - oldest uncommited update (in ms) is this long ago
-    <autoCommit> 
-      <maxDocs>10000</maxDocs>
-      <maxTime>1000</maxTime> 
-    </autoCommit>
-    -->
-
-  </updateHandler>
-
-
-  <query>
-    <!-- Maximum number of clauses in a boolean query... can affect
-        range or prefix queries that expand to big boolean
-        queries.  An exception is thrown if exceeded.  -->
-    <maxBooleanClauses>${solr.max.booleanClauses:1024}</maxBooleanClauses>
-
-    
-    <!-- Cache used by SolrIndexSearcher for filters (DocSets),
-         unordered sets of *all* documents that match a query.
-         When a new searcher is opened, its caches may be prepopulated
-         or "autowarmed" using data from caches in the old searcher.
-         autowarmCount is the number of items to prepopulate.  For LRUCache,
-         the autowarmed items will be the most recently accessed items.
-       Parameters:
-         class - the SolrCache implementation (currently only LRUCache)
-         size - the maximum number of entries in the cache
-         initialSize - the initial capacity (number of entries) of
-           the cache.  (seel java.util.HashMap)
-         autowarmCount - the number of entries to prepopulate from
-           an old cache.
-         -->
-    <filterCache
-      class="solr.LRUCache"
-      size="512"
-      initialSize="512"
-      autowarmCount="128"/>
-
-   <!-- queryResultCache caches results of searches - ordered lists of
-         document ids (DocList) based on a query, a sort, and the range
-         of documents requested.  -->
-    <queryResultCache
-      class="solr.LRUCache"
-      size="512"
-      initialSize="512"
-      autowarmCount="32"/>
-
-  <!-- documentCache caches Lucene Document objects (the stored fields for each document).
-       Since Lucene internal document ids are transient, this cache will not be autowarmed.  -->
-    <documentCache
-      class="solr.LRUCache"
-      size="512"
-      initialSize="512"
-      autowarmCount="0"/>
-
-    <!-- If true, stored fields that are not requested will be loaded lazily.
-
-    This can result in a significant speed improvement if the usual case is to
-    not load all stored fields, especially if the skipped fields are large compressed
-    text fields.
-    -->
-    <enableLazyFieldLoading>true</enableLazyFieldLoading>
-
-    <!-- Example of a generic cache.  These caches may be accessed by name
-         through SolrIndexSearcher.getCache(),cacheLookup(), and cacheInsert().
-         The purpose is to enable easy caching of user/application level data.
-         The regenerator argument should be specified as an implementation
-         of solr.search.CacheRegenerator if autowarming is desired.  -->
-    <!--
-    <cache name="myUserCache"
-      class="solr.LRUCache"
-      size="4096"
-      initialSize="1024"
-      autowarmCount="1024"
-      regenerator="org.mycompany.mypackage.MyRegenerator"
-      />
-    -->
-
-   <!-- An optimization that attempts to use a filter to satisfy a search.
-         If the requested sort does not include score, then the filterCache
-         will be checked for a filter matching the query. If found, the filter
-         will be used as the source of document ids, and then the sort will be
-         applied to that.
-    <useFilterForSortedQuery>true</useFilterForSortedQuery>
-   -->
-
-   <!-- An optimization for use with the queryResultCache.  When a search
-         is requested, a superset of the requested number of document ids
-         are collected.  For example, if a search for a particular query
-         requests matching documents 10 through 19, and queryWindowSize is 50,
-         then documents 0 through 49 will be collected and cached.  Any further
-         requests in that range can be satisfied via the cache.  -->
-    <queryResultWindowSize>50</queryResultWindowSize>
-    
-    <!-- Maximum number of documents to cache for any entry in the
-         queryResultCache. -->
-    <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
-
-    <!-- a newSearcher event is fired whenever a new searcher is being prepared
-         and there is a current searcher handling requests (aka registered). -->
-    <!-- QuerySenderListener takes an array of NamedList and executes a
-         local query request for each NamedList in sequence. -->
-    <listener event="newSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <lst> <str name="q">solr</str> <str name="start">0</str> <str name="rows">10</str> </lst>
-        <lst> <str name="q">rocks</str> <str name="start">0</str> <str name="rows">10</str> </lst>
-        <lst><str name="q">static newSearcher warming query from solrconfig.xml</str></lst>
-      </arr>
-    </listener>
-
-    <!-- a firstSearcher event is fired whenever a new searcher is being
-         prepared but there is no current registered searcher to handle
-         requests or to gain autowarming data from. -->
-    <listener event="firstSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <lst> <str name="q">fast_warm</str> <str name="start">0</str> <str name="rows">10</str> </lst>
-        <lst><str name="q">static firstSearcher warming query from solrconfig.xml</str></lst>
-      </arr>
-    </listener>
-
-    <!-- If a search request comes in and there is no current registered searcher,
-         then immediately register the still warming searcher and use it.  If
-         "false" then all requests will block until the first searcher is done
-         warming. -->
-    <useColdSearcher>false</useColdSearcher>
-
-  </query>
-
-  <requestDispatcher>
-    <!--Make sure your system has some authentication before enabling remote streaming!
-    <requestParsers enableRemoteStreaming="false" multipartUploadLimitInKB="-1" />
-    -->
-
-    <!-- Set HTTP caching related parameters (for proxy caches and clients).
-          
-         To get the behaviour of Solr 1.2 (ie: no caching related headers)
-         use the never304="true" option and do not specify a value for
-         <cacheControl>
-    -->
-    <!-- <httpCaching never304="true"> -->
-    <httpCaching lastModifiedFrom="openTime"
-                 etagSeed="Solr">
-       <!-- lastModFrom="openTime" is the default, the Last-Modified value
-            (and validation against If-Modified-Since requests) will all be
-            relative to when the current Searcher was opened.
-            You can change it to lastModFrom="dirLastMod" if you want the
-            value to exactly corrispond to when the physical index was last
-            modified.
-               
-            etagSeed="..." is an option you can change to force the ETag
-            header (and validation against If-None-Match requests) to be
-            differnet even if the index has not changed (ie: when making
-            significant changes to your config file)
-
-            lastModifiedFrom and etagSeed are both ignored if you use the
-            never304="true" option.
-       -->
-       <!-- If you include a <cacheControl> directive, it will be used to
-            generate a Cache-Control header, as well as an Expires header
-            if the value contains "max-age="
-               
-            By default, no Cache-Control header is generated.
-
-            You can use the <cacheControl> option even if you have set
-            never304="true"
-       -->
-       <!-- <cacheControl>max-age=30, public</cacheControl> -->
-    </httpCaching>
-  </requestDispatcher>
-
-  <requestHandler name="/select" class="solr.SearchHandler">
-    <!-- default values for query parameters -->
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-       <!-- 
-       <int name="rows">10</int>
-       <str name="fl">*</str>
-       <str name="version">2.1</str>
-        -->
-     </lst>
-    <arr name="last-components">
-      <str>clustering</str>
-    </arr>
-  </requestHandler>
-
-
-  <requestHandler name="docClustering" class="solr.SearchHandler">
-    <!-- default values for query parameters -->
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-       <!--
-       <int name="rows">10</int>
-       <str name="fl">*</str>
-       <str name="version">2.1</str>
-        -->
-     </lst>
-    <arr name="last-components">
-      <str>doc-clustering</str>
-    </arr>
-  </requestHandler>
-
-  <!-- DisMaxRequestHandler allows easy searching across multiple fields
-       for simple user-entered phrases.  Its implementation is now
-       just the standard SearchHandler with a default query parser
-       of "dismax". 
-       see http://wiki.apache.org/solr/DisMaxRequestHandler
-   -->
-
-
-  <searchComponent class="org.apache.solr.handler.clustering.ClusteringComponent" name="clustering">
-    <!-- Declare an engine -->
-    <lst name="engine">
-      <!-- The name, only one can be named "default" -->
-      <str name="name">default</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">stc</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">mock</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.MockClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">mock-external-attrs</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.MockClusteringAlgorithm</str>
-      <!-- takes precedence over external XML -->
-      <int name="MockClusteringAlgorithm.labels">4</int>
-    </lst>
-    <lst name="engine">
-      <str name="name">echo</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.EchoClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">lexical-resource-check</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.LexicalResourcesCheckClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">lexical-resource-check-custom-resource-dir</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.LexicalResourcesCheckClusteringAlgorithm</str>
-      <str name="carrot.resourcesDir">clustering/custom</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">custom-duplicating-tokenizer</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.EchoTokensClusteringAlgorithm</str>
-      <str name="PreprocessingPipeline.tokenizerFactory">org.apache.solr.handler.clustering.carrot2.DuplicatingTokenizerFactory</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">custom-duplicating-stemmer</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.EchoStemsClusteringAlgorithm</str>
-      <str name="PreprocessingPipeline.stemmerFactory">org.apache.solr.handler.clustering.carrot2.DuplicatingStemmerFactory</str>
-    </lst>
-  </searchComponent>
-
-  <searchComponent class="org.apache.solr.handler.clustering.ClusteringComponent" name="doc-clustering">
-    <!-- Declare an engine -->
-    <lst name="engine">
-      <!-- The name, only one can be named "default" -->
-      <str name="name">mock</str>
-      <str name="classname">org.apache.solr.handler.clustering.MockDocumentClusteringEngine</str>
-    </lst>
-  </searchComponent>
- 
-
-  <searchComponent class="org.apache.solr.handler.clustering.ClusteringComponent" name="clustering-name-default">
-    <lst name="engine">
-      <str name="name">stc</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">default</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">mock</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.MockClusteringAlgorithm</str>
-    </lst>
-  </searchComponent>
-
-  <searchComponent class="org.apache.solr.handler.clustering.ClusteringComponent" name="clustering-name-decl-order">
-    <lst name="engine">
-      <bool name="optional">true</bool>
-      <str name="name">unavailable</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.lingo.UnavailableAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">lingo</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">stc</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="name">mock</str>
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.MockClusteringAlgorithm</str>
-    </lst>
-  </searchComponent>
-  
-  <searchComponent class="org.apache.solr.handler.clustering.ClusteringComponent" name="clustering-name-dups">
-    <lst name="engine">
-      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
-    </lst>
-    <lst name="engine">
-      <str name="carrot.algorithm">org.apache.solr.handler.clustering.carrot2.MockClusteringAlgorithm</str>
-    </lst>
-  </searchComponent>
-
-  <highlighting>
-   <!-- Configure the standard fragmenter -->
-   <!-- This could most likely be commented out in the "default" case -->
-   <fragmenter name="gap" class="org.apache.solr.highlight.GapFragmenter" default="true">
-    <lst name="defaults">
-     <int name="hl.fragsize">100</int>
-    </lst>
-   </fragmenter>
-
-   <!-- A regular-expression-based fragmenter (f.i., for sentence extraction) -->
-   <fragmenter name="regex" class="org.apache.solr.highlight.RegexFragmenter">
-    <lst name="defaults">
-      <!-- slightly smaller fragsizes work better because of slop -->
-      <int name="hl.fragsize">70</int>
-      <!-- allow 50% slop on fragment sizes -->
-      <float name="hl.regex.slop">0.5</float> 
-      <!-- a basic sentence pattern -->
-      <str name="hl.regex.pattern">[-\w ,/\n\"']{20,200}</str>
-    </lst>
-   </fragmenter>
-   
-   <!-- Configure the standard formatter -->
-   <formatter name="html" class="org.apache.solr.highlight.HtmlFormatter" default="true">
-    <lst name="defaults">
-     <str name="hl.simple.pre"><![CDATA[<em>]]></str>
-     <str name="hl.simple.post"><![CDATA[</em>]]></str>
-    </lst>
-   </formatter>
-  </highlighting>
-  
-  
-  <!-- queryResponseWriter plugins... query responses will be written using the
-    writer specified by the 'wt' request parameter matching the name of a registered
-    writer.
-    The "default" writer is the default and will be used if 'wt' is not specified 
-    in the request. XMLResponseWriter will be used if nothing is specified here.
-    The json, python, and ruby writers are also available by default.
-
-    <queryResponseWriter name="xml" class="solr.XMLResponseWriter" default="true"/>
-    <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
-    <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
-    <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
-    <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
-    <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
-
-    <queryResponseWriter name="custom" class="com.example.MyResponseWriter"/>
-  -->
-
-  <!-- XSLT response writer transforms the XML output by any xslt file found
-       in Solr's conf/xslt directory.  Changes to xslt files are checked for
-       every xsltCacheLifetimeSeconds.  
-   -->
-  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
-    <int name="xsltCacheLifetimeSeconds">5</int>
-  </queryResponseWriter> 
-
-
-  <!-- example of registering a query parser
-  <queryParser name="lucene" class="org.apache.solr.search.LuceneQParserPlugin"/>
-  -->
-
-  <!-- example of registering a custom function parser 
-  <valueSourceParser name="myfunc" class="com.mycompany.MyValueSourceParser" />
-  -->
-    
-  <!-- config for the admin interface --> 
-  <admin>
-    <defaultQuery>solr</defaultQuery>
-  </admin>
-
-</config>
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/spellings.txt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/spellings.txt
deleted file mode 100644
index d7ede6f..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/spellings.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-pizza
-history
\ No newline at end of file
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/stopwords.txt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/stopwords.txt
deleted file mode 100644
index 54f0b99..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/stopwords.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-# a couple of test stopwords to test that the words are really being
-# configured from this file:
-stopworda
-stopwordb
-
-#Standard english stop words taken from Lucene's StopAnalyzer
-a
-an
-and
-are
-as
-at
-be
-but
-by
-for
-if
-in
-into
-is
-it
-no
-not
-of
-on
-or
-s
-such
-t
-that
-the
-their
-then
-there
-these
-they
-this
-to
-was
-will
-with
-solrownstopword
-
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt
deleted file mode 100644
index 26d237a..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-#some test synonym mappings unlikely to appear in real input text
-aaa => aaaa
-bbb => bbbb1 bbbb2
-ccc => cccc1,cccc2
-a\=>a => b\=>b
-a\,a => b\,b
-fooaaa,baraaa,bazaaa
-
-# Some synonym groups specific to this example
-GB,gib,gigabyte,gigabytes
-MB,mib,megabyte,megabytes
-Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
-#after us won't split it into two words.
-
-# Synonym mappings can be used for spelling correction too
-pixima => pixma
-
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/core.properties b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/core.properties
deleted file mode 100644
index e69de29..0000000
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/solr.xml b/solr/contrib/clustering/src/test-files/clustering/solr/solr.xml
deleted file mode 100644
index 3596eeb..0000000
--- a/solr/contrib/clustering/src/test-files/clustering/solr/solr.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!--
- solr.xml mimicking the old default solr.xml
--->
-
-<solr>
-
-  <str name="coreRootDirectory">cores/</str>
-  <str name="configSetBaseDir"></str>
-
-    <shardHandlerFactory name="shardHandlerFactory" class="HttpShardHandlerFactory">
-     <str name="urlScheme">${urlScheme:}</str>
-     </shardHandlerFactory>
-
-</solr>
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/AbstractClusteringTestCase.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/AbstractClusteringTestCase.java
deleted file mode 100644
index ca2f424..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/AbstractClusteringTestCase.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-import java.io.File;
-import java.util.Map;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.SolrInputDocument;
-import org.junit.BeforeClass;
-
-
-/**
- *
- */
-public abstract class AbstractClusteringTestCase extends SolrTestCaseJ4 {
-  protected static int numberOfDocs = 0;
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    File testHome = createTempDir().toFile();
-    FileUtils.copyDirectory(getFile("clustering/solr"), testHome);
-    initCore("solrconfig.xml", "schema.xml", testHome.getAbsolutePath());
-    numberOfDocs = 0;
-    for (String[] doc : DOCUMENTS) {
-      assertNull(h.validateUpdate(adoc("id", Integer.toString(numberOfDocs), "url", doc[0], "title", doc[1], "snippet", doc[2])));
-      numberOfDocs++;
-    }
-    
-    // Add a multi-valued snippet
-    final SolrInputDocument multiValuedSnippet = new SolrInputDocument();
-    multiValuedSnippet.addField("id", numberOfDocs++);
-    multiValuedSnippet.addField("title", "Title");
-    multiValuedSnippet.addField("url", "URL");
-    multiValuedSnippet.addField("snippet", "First value of multi field. Some more text. And still more.");
-    multiValuedSnippet.addField("snippet", "Second value of multi field. Some more text. And still more.");
-    multiValuedSnippet.addField("snippet", "Third value of multi field. Some more text. And still more.");
-    assertNull(h.validateUpdate(adoc(multiValuedSnippet)));
-
-    // Add a document with multi-field title and snippet
-    final SolrInputDocument multiFieldDoc = new SolrInputDocument();
-    multiFieldDoc.addField("id", numberOfDocs++);
-    multiFieldDoc.addField("title", "Title field");
-    multiFieldDoc.addField("heading", "Heading field");
-    multiFieldDoc.addField("url", "URL");
-    multiFieldDoc.addField("snippet", "Snippet field: this is the contents of the snippet field.");
-    multiFieldDoc.addField("body", "Body field: this is the contents of the body field that will get clustered together with snippet.");
-    assertNull(h.validateUpdate(adoc(multiFieldDoc)));
-    
-    // Add a document with one language supported by Carrot2
-    final SolrInputDocument docWithOneSupprtedLanguage = new SolrInputDocument();
-    docWithOneSupprtedLanguage.addField("id", numberOfDocs++);
-    docWithOneSupprtedLanguage.addField("title", "");
-    docWithOneSupprtedLanguage.addField("url", "one_supported_language");
-    docWithOneSupprtedLanguage.addField("lang", "zh-cn");
-    assertNull(h.validateUpdate(adoc(docWithOneSupprtedLanguage)));
-    
-    // Add a document with more languages, one supported by Carrot2
-    final SolrInputDocument docWithOneSupprtedLanguageOfMany = new SolrInputDocument();
-    docWithOneSupprtedLanguageOfMany.addField("id", numberOfDocs++);
-    docWithOneSupprtedLanguageOfMany.addField("url", "one_supported_language_of_many");
-    docWithOneSupprtedLanguageOfMany.addField("lang", "zh-tw");
-    docWithOneSupprtedLanguageOfMany.addField("lang", "POLISH");
-    docWithOneSupprtedLanguageOfMany.addField("lang", "de");
-    assertNull(h.validateUpdate(adoc(docWithOneSupprtedLanguageOfMany)));
-    
-    // Add a document with more languages, one supported by Carrot2
-    final SolrInputDocument docWithCustomFields = new SolrInputDocument();
-    docWithCustomFields.addField("id", numberOfDocs++);
-    docWithCustomFields.addField("url", "custom_fields");
-    docWithCustomFields.addField("intfield_i", 10);
-    docWithCustomFields.addField("floatfield_f", 10.5);
-    docWithCustomFields.addField("heading", "first");
-    docWithCustomFields.addField("heading", "second");
-    assertNull(h.validateUpdate(adoc(docWithCustomFields)));
-    assertNull(h.validateUpdate(commit()));
-  }
-
-  /**
-   * Expose package-scope methods from {@link ClusteringComponent} to tests.
-   */
-  protected final Map<String,SearchClusteringEngine> getSearchClusteringEngines(ClusteringComponent comp) {
-    return comp.getSearchClusteringEngines();
-  }
-
-  final static String[][] DOCUMENTS = new String[][]{
-          {"http://en.wikipedia.org/wiki/Data_mining",
-                  "Data Mining - Wikipedia",
-                  "Article about knowledge-discovery in databases (KDD), the practice of automatically searching large stores of data for patterns."},
-
-
-          {"http://en.wikipedia.org/wiki/Datamining",
-                  "Data mining - Wikipedia, the free encyclopedia",
-                  "Data mining is the entire process of applying computer-based methodology, ... Moreover, some data-mining systems such as neural networks are inherently geared ..."},
-
-
-          {"http://www.statsoft.com/textbook/stdatmin.html",
-                  "Electronic Statistics Textbook: Data Mining Techniques",
-                  "Outlines the crucial concepts in data mining, defines the data warehousing process, and offers examples of computational and graphical exploratory data analysis techniques."},
-
-
-          {"http://www.thearling.com/text/dmwhite/dmwhite.htm",
-                  "An Introduction to Data Mining",
-                  "Data mining, the extraction of hidden predictive information from large ... Data mining tools predict future trends and behaviors, allowing businesses to ..."},
-
-
-          {"http://www.anderson.ucla.edu/faculty/jason.frand/teacher/technologies/palace/datamining.htm",
-                  "Data Mining: What is Data Mining?",
-                  "Outlines what knowledge discovery, the process of analyzing data from different perspectives and summarizing it into useful information, can do and how it works."},
-
-
-          {"http://www.spss.com/datamine",
-                  "Data Mining Software, Data Mining Applications and Data Mining Solutions",
-                  "The patterns uncovered using data mining help organizations make better and ... data mining customer ... Data mining applications, on the other hand, embed ..."},
-
-
-          {"http://www.kdnuggets.com/",
-                  "KD Nuggets",
-                  "Newsletter on the data mining and knowledge industries, offering information on data mining, knowledge discovery, text mining, and web mining software, courses, jobs, publications, and meetings."},
-
-
-          {"http://www.answers.com/topic/data-mining",
-                  "data mining: Definition from Answers.com",
-                  "data mining n. The automatic extraction of useful, often previously unknown information from large databases or data ... Data Mining For Investing ..."},
-
-
-          {"http://www.statsoft.com/products/dataminer.htm",
-                  "STATISTICA Data Mining and Predictive Modeling Solutions",
-                  "GRC site-wide menuing system research and development. ... Contact a Data Mining Solutions Consultant. News and Success Stories. Events ..."},
-
-
-          {"http://datamining.typepad.com/",
-                  "Data Mining: Text Mining, Visualization and Social Media",
-                  "Commentary on text mining, data mining, social media and data visualization. ... While mining Twitter data for business and marketing intelligence (trend/buzz ..."},
-
-
-          {"http://www.twocrows.com/",
-                  "Two Crows Corporation",
-                  "Dedicated to the development, marketing, sales and support of tools for knowledge discovery to make data mining accessible and easy to use."},
-
-
-          {"http://www.thearling.com/",
-                  "Thearling.com",
-                  "Kurt Thearling's site dedicated to sharing information about data mining, the automated extraction of hidden predictive information from databases, and other analytic technologies."},
-
-
-          {"http://www.ccsu.edu/datamining/",
-                  "CCSU - Data Mining",
-                  "Offers degrees and certificates in data mining. Allows students to explore cutting-edge data mining techniques and applications: market basket analysis, decision trees, neural networks, machine learning, web mining, and data modeling."},
-
-
-          {"http://www.oracle.com/technology/products/bi/odm",
-                  "Oracle Data Mining",
-                  "Oracle Data Mining Product Center ... New Oracle Data Mining Powers New Social CRM Application (more information ... Mining High-Dimensional Data for ..."},
-
-
-          {"http://databases.about.com/od/datamining/a/datamining.htm",
-                  "Data Mining: An Introduction",
-                  "About.com article on how businesses are discovering new trends and patterns of behavior that previously went unnoticed through data mining, automated statistical analysis techniques."},
-
-
-          {"http://www.dmoz.org/Computers/Software/Databases/Data_Mining/",
-                  "Open Directory - Computers: Software: Databases: Data Mining",
-                  "Data Mining and Knowledge Discovery - A peer-reviewed journal publishing ... Data mining creates information assets that an organization can leverage to ..."},
-
-
-          {"http://www.cs.wisc.edu/dmi/",
-                  "DMI:Data Mining Institute",
-                  "Data Mining Institute at UW-Madison ... The Data Mining Institute (DMI) was started on June 1, 1999 at the Computer ... of the Data Mining Group of Microsoft ..."},
-
-
-          {"http://www.the-data-mine.com/",
-                  "The Data Mine",
-                  "Provides information about data mining also known as knowledge discovery in databases (KDD) or simply knowledge discovery. List software, events, organizations, and people working in data mining."},
-
-
-          {"http://www.statserv.com/datamining.html",
-                  "St@tServ - About Data Mining",
-                  "St@tServ Data Mining page ... Data mining in molecular biology, by Alvis Brazma. Graham Williams page. Knowledge Discovery and Data Mining Resources, ..."},
-
-
-          {"http://ocw.mit.edu/OcwWeb/Sloan-School-of-Management/15-062Data-MiningSpring2003/CourseHome/index.htm",
-                  "MIT OpenCourseWare | Sloan School of Management | 15.062 Data Mining ...",
-                  "Introduces students to a class of methods known as data mining that assists managers in recognizing patterns and making intelligent use of massive amounts of ..."},
-
-
-          {"http://www.pentaho.com/products/data_mining/",
-                  "Pentaho Commercial Open Source Business Intelligence: Data Mining",
-                  "For example, data mining can warn you there's a high probability a specific ... Pentaho Data Mining is differentiated by its open, standards-compliant nature, ..."},
-
-
-          {"http://www.investorhome.com/mining.htm",
-                  "Investor Home - Data Mining",
-                  "Data Mining or Data Snooping is the practice of searching for relationships and ... Data mining involves searching through databases for correlations and patterns ..."},
-
-
-          {"http://www.datamining.com/",
-                  "Predictive Modeling and Predictive Analytics Solutions | Enterprise ...",
-                  "Insightful Enterprise Miner - Enterprise data mining for predictive modeling and predictive analytics."},
-
-
-          {"http://www.sourcewatch.org/index.php?title=Data_mining",
-                  "Data mining - SourceWatch",
-                  "These agencies reported 199 data mining projects, of which 68 ... Office, \"DATA MINING. ... powerful technology known as data mining -- and how, in the ..."},
-
-
-          {"http://www.autonlab.org/tutorials/",
-                  "Statistical Data Mining Tutorials",
-                  "Includes a set of tutorials on many aspects of statistical data mining, including the foundations of probability, the foundations of statistical data analysis, and most of the classic machine learning and data mining algorithms."},
-
-
-          {"http://www.microstrategy.com/data-mining/index.asp",
-                  "Data Mining",
-                  "With MicroStrategy, data mining scoring is fully integrated into mainstream ... The integration of data mining models from other applications is accomplished by ..."},
-
-
-          {"http://www.datamininglab.com/",
-                  "Elder Research",
-                  "Provides consulting and short courses in data mining and pattern discovery patterns in data."},
-
-
-          {"http://www.sqlserverdatamining.com/",
-                  "SQL Server Data Mining > Home",
-                  "SQL Server Data Mining Portal ... Data Mining as an Application Platform (Whitepaper) Creating a Web Cross-sell Application with SQL Server 2005 Data Mining (Article) ..."},
-
-
-          {"http://databases.about.com/cs/datamining/g/dmining.htm",
-                  "Data Mining",
-                  "What is data mining? Find out here! ... Book Review: Data Mining and Statistical Analysis Using SQL. What is Data Mining, and What Does it Have to Do with ..."},
-
-
-          {"http://www.sas.com/technologies/analytics/datamining/index.html",
-                  "Data Mining Software and Text Mining | SAS",
-                  "... raw data to smarter ... Data Mining is an iterative process of creating ... The knowledge gleaned from data and text mining can be used to fuel ..."}
-  };
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/ClusteringComponentTest.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/ClusteringComponentTest.java
deleted file mode 100644
index e481a11..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/ClusteringComponentTest.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.handler.component.QueryComponent;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.QueryCommand;
-import org.apache.solr.search.QueryResult;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- *
- *
- **/
-public class ClusteringComponentTest extends AbstractClusteringTestCase {
-
-  @Before
-  public void doBefore() {
-    clearIndex();
-  }
-
-  @Test
-  public void testComponent() throws Exception {
-    SolrCore core = h.getCore();
-
-    SearchComponent sc = core.getSearchComponent("clustering");
-    assertTrue("sc is null and it shouldn't be", sc != null);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    params.add(ClusteringComponent.COMPONENT_NAME, "true");
-    params.add(CommonParams.Q, "*:*");
-
-    params.add(ClusteringParams.USE_SEARCH_RESULTS, "true");
-
-
-    SolrRequestHandler handler = core.getRequestHandler("/select");
-    SolrQueryResponse rsp;
-    rsp = new SolrQueryResponse();
-    rsp.addResponseHeader(new SimpleOrderedMap<>());
-    SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
-    handler.handleRequest(req, rsp);
-    NamedList<?> values = rsp.getValues();
-    Object clusters = values.get("clusters");
-    //System.out.println("Clusters: " + clusters);
-    assertTrue("clusters is null and it shouldn't be", clusters != null);
-    req.close();
-
-    params = new ModifiableSolrParams();
-    params.add(ClusteringComponent.COMPONENT_NAME, "true");
-    params.add(ClusteringParams.ENGINE_NAME, "mock");
-    params.add(ClusteringParams.USE_COLLECTION, "true");
-    params.add(QueryComponent.COMPONENT_NAME, "false");
-
-    handler = core.getRequestHandler("docClustering");
-
-    rsp = new SolrQueryResponse();
-    rsp.addResponseHeader(new SimpleOrderedMap<>());
-    req = new LocalSolrQueryRequest(core, params);
-    handler.handleRequest(req, rsp);
-    values = rsp.getValues();
-    clusters = values.get("clusters");
-    //System.out.println("Clusters: " + clusters);
-    assertTrue("clusters is null and it shouldn't be", clusters != null);
-    req.close();
-  }
-
-
-  // tests ClusteringComponent.docListToSolrDocumentList
-  @Test
-  public void testDocListConversion() throws Exception {
-    assertU("", adoc("id", "3234", "url", "ignoreme", "val_i", "1",
-        "val_dynamic", "quick red fox"));
-    assertU("", adoc("id", "3235", "url", "ignoreme", "val_i", "1",
-        "val_dynamic", "quick green fox"));
-    assertU("", adoc("id", "3236", "url", "ignoreme", "val_i", "1",
-        "val_dynamic", "quick brown fox"));
-    assertU("", commit());
-
-    h.getCore().withSearcher(srchr -> {
-      QueryResult qr = new QueryResult();
-      QueryCommand cmd = new QueryCommand();
-      cmd.setQuery(new MatchAllDocsQuery());
-      cmd.setLen(10);
-      qr = srchr.search(qr, cmd);
-
-      DocList docs = qr.getDocList();
-      assertEquals("wrong docs size", 3, docs.size());
-      Set<String> fields = new HashSet<>();
-      fields.add("val_dynamic");
-      fields.add("dynamic_val");
-      fields.add("range_facet_l"); // copied from id
-
-      SolrDocumentList list = ClusteringComponent.docListToSolrDocumentList(docs, srchr, fields, null);
-      assertEquals("wrong list Size", docs.size(), list.size());
-      for (SolrDocument document : list) {
-
-        assertTrue("unexpected field", ! document.containsKey("val_i"));
-        assertTrue("unexpected id field", ! document.containsKey("id"));
-
-        assertTrue("original field", document.containsKey("val_dynamic"));
-        assertTrue("dyn copy field", document.containsKey("dynamic_val"));
-        assertTrue("copy field", document.containsKey("range_facet_l"));
-
-        assertNotNull("original field null", document.get("val_dynamic"));
-        assertNotNull("dyn copy field null", document.get("dynamic_val"));
-        assertNotNull("copy field null", document.get("range_facet_l"));
-      }
-      return null;
-    });
-  }
-
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/DistributedClusteringComponentTest.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/DistributedClusteringComponentTest.java
deleted file mode 100644
index 89d3ddf..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/DistributedClusteringComponentTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-
-import org.apache.solr.BaseDistributedSearchTestCase;
-import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.common.params.CommonParams;
-import org.junit.Test;
-
-@SuppressSSL
-public class DistributedClusteringComponentTest extends
-    BaseDistributedSearchTestCase {
-
-  @Override
-  public String getSolrHome() {
-    return getFile("clustering/solr/collection1").getParent();
-  }
-
-  @Test
-  public void test() throws Exception {
-    del("*:*");
-    int numberOfDocs = 0;
-    for (String[] doc : AbstractClusteringTestCase.DOCUMENTS) {
-      index(id, Integer.toString(numberOfDocs++), "url", doc[0], "title", doc[1], "snippet", doc[2]);
-    }
-    commit();
-    handle.clear();
-    // Only really care about the clusters for this test case, so drop the header and response
-    handle.put("responseHeader", SKIP);
-    handle.put("response", SKIP);
-    query(                                                                                                   
-        ClusteringComponent.COMPONENT_NAME, "true",
-        CommonParams.Q, "*:*",
-        CommonParams.SORT, id + " desc",
-        ClusteringParams.USE_SEARCH_RESULTS, "true");
-    // destroy is not needed because distribTearDown method of base class does it.
-    //destroyServers();
-  }
-
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java
deleted file mode 100644
index dc98266..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.search.DocSet;
-
-
-/**
- *
- *
- **/
-public class MockDocumentClusteringEngine extends DocumentClusteringEngine {
-  @Override
-  public NamedList<?> cluster(DocSet docs, SolrParams solrParams) {
-    return new NamedList<>();
-  }
-
-  @Override
-  public NamedList<?> cluster(SolrParams solrParams) {
-    return new NamedList<>();
-  }
-  
-  @Override
-  public boolean isAvailable() {
-    return true;
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngineTest.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngineTest.java
deleted file mode 100644
index 31be33a..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngineTest.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.TermQuery;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.handler.clustering.AbstractClusteringTestCase;
-import org.apache.solr.handler.clustering.ClusteringComponent;
-import org.apache.solr.handler.clustering.ClusteringEngine;
-import org.apache.solr.handler.clustering.SearchClusteringEngine;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.search.DocList;
-import org.carrot2.clustering.lingo.LingoClusteringAlgorithm;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.util.attribute.AttributeUtils;
-import org.junit.Test;
-
-/**
- *
- */
-public class CarrotClusteringEngineTest extends AbstractClusteringTestCase {
-  @Test
-  public void testCarrotLingo() throws Exception {
-    // Note: the expected number of clusters may change after upgrading Carrot2
-    // due to e.g. internal improvements or tuning of Carrot2 clustering.
-    final int expectedNumClusters = 10;
-    checkEngine(getClusteringEngine("default"), expectedNumClusters);
-  }
-
-  @Test
-  public void testProduceSummary() throws Exception {
-    // We'll make two queries, one with- and another one without summary
-    // and assert that documents are shorter when highlighter is in use.
-    final List<NamedList<Object>> noSummaryClusters = clusterWithHighlighting(false, 80);
-    final List<NamedList<Object>> summaryClusters = clusterWithHighlighting(true, 80);
-
-    assertEquals("Equal number of clusters", noSummaryClusters.size(), summaryClusters.size());
-    for (int i = 0; i < noSummaryClusters.size(); i++) {
-      assertTrue("Summary shorter than original document", 
-          getLabels(noSummaryClusters.get(i)).get(1).length() > 
-          getLabels(summaryClusters.get(i)).get(1).length()); 
-    }
-  }
-  
-  @Test
-  public void testSummaryFragSize() throws Exception {
-    // We'll make two queries, one short summaries and another one with longer
-    // summaries and will check that the results differ.
-    final List<NamedList<Object>> shortSummaryClusters = clusterWithHighlighting(true, 30);
-    final List<NamedList<Object>> longSummaryClusters = clusterWithHighlighting(true, 80);
-    
-    assertEquals("Equal number of clusters", shortSummaryClusters.size(), longSummaryClusters.size());
-    for (int i = 0; i < shortSummaryClusters.size(); i++) {
-      assertTrue("Summary shorter than original document", 
-          getLabels(shortSummaryClusters.get(i)).get(1).length() < 
-      getLabels(longSummaryClusters.get(i)).get(1).length()); 
-    }
-  }
-
-  private List<NamedList<Object>> clusterWithHighlighting(
-      boolean enableHighlighting, int fragSize) throws IOException {
-    // Some documents don't have mining in the snippet
-    return clusterWithHighlighting(enableHighlighting, fragSize, 1, "mine", numberOfDocs - 7);
-  }
-
-  private List<NamedList<Object>> clusterWithHighlighting(
-      boolean enableHighlighting, int fragSize, int summarySnippets,
-      String term, int expectedNumDocuments) throws IOException {
-    
-    final TermQuery query = new TermQuery(new Term("snippet", term));
-
-    final ModifiableSolrParams summaryParams = new ModifiableSolrParams();
-    summaryParams.add(CarrotParams.SNIPPET_FIELD_NAME, "snippet");
-    summaryParams.add(CarrotParams.PRODUCE_SUMMARY,
-        Boolean.toString(enableHighlighting));
-    summaryParams
-        .add(CarrotParams.SUMMARY_FRAGSIZE, Integer.toString(fragSize));
-    summaryParams
-        .add(CarrotParams.SUMMARY_SNIPPETS, Integer.toString(summarySnippets));
-    final List<NamedList<Object>> summaryClusters = checkEngine(
-        getClusteringEngine("echo"), expectedNumDocuments,
-        expectedNumDocuments, query, summaryParams);
-    
-    return summaryClusters;
-  }
-
-  @Test
-  public void testCarrotStc() throws Exception {
-    checkEngine(getClusteringEngine("stc"), 3);
-  }
-
-  @Test
-  public void testWithoutSubclusters() throws Exception {
-    checkClusters(checkEngine(getClusteringEngine("mock"), AbstractClusteringTestCase.numberOfDocs),
-        1, 1, 0);
-  }
-
-  @Test
-  public void testExternalXmlAttributesFile() throws Exception {
-    checkClusters(
-        checkEngine(getClusteringEngine("mock-external-attrs"), 13),
-        1, 4, 0);
-  }
-
-  @Test
-  public void testWithSubclusters() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CarrotParams.OUTPUT_SUB_CLUSTERS, true);
-    checkClusters(checkEngine(getClusteringEngine("mock"), AbstractClusteringTestCase.numberOfDocs), 1, 1, 2);
-  }
-
-  @Test
-  public void testNumDescriptions() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(AttributeUtils.getKey(MockClusteringAlgorithm.class, "labels"), 5);
-    params.set(CarrotParams.NUM_DESCRIPTIONS, 3);
-    checkClusters(checkEngine(getClusteringEngine("mock"), AbstractClusteringTestCase.numberOfDocs,
-            params), 1, 3, 0);
-  }
-
-  @Test
-  public void testClusterScores() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(AttributeUtils.getKey(MockClusteringAlgorithm.class, "depth"), 1);
-    List<NamedList<Object>> clusters = checkEngine(getClusteringEngine("mock"),
-        AbstractClusteringTestCase.numberOfDocs, params);
-    int i = 1;
-    for (NamedList<Object> cluster : clusters) {
-      final Double score = getScore(cluster);
-      assertNotNull(score);
-      assertEquals(0.25 * i++, score, 0);
-    }
-  }
-
-  @Test
-  public void testOtherTopics() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(AttributeUtils.getKey(MockClusteringAlgorithm.class, "depth"), 1);
-    params.set(AttributeUtils.getKey(MockClusteringAlgorithm.class, "otherTopicsModulo"), 2);
-    List<NamedList<Object>> clusters = checkEngine(getClusteringEngine("mock"),
-        AbstractClusteringTestCase.numberOfDocs, params);
-    int i = 1;
-    for (NamedList<Object> cluster : clusters) {
-      assertEquals(i++ % 2 == 0 ? true : null, isOtherTopics(cluster));
-    }
-  }
-
-  @Test
-  public void testCarrotAttributePassing() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(AttributeUtils.getKey(MockClusteringAlgorithm.class, "depth"), 1);
-    params.set(AttributeUtils.getKey(MockClusteringAlgorithm.class, "labels"), 3);
-    checkClusters(checkEngine(getClusteringEngine("mock"), AbstractClusteringTestCase.numberOfDocs,
-            params), 1, 3, 0);
-  }
-
-  @Test
-  public void testLexicalResourcesFromSolrConfigDefaultDir() throws Exception {
-    checkLexicalResourcesFromSolrConfig("lexical-resource-check",
-        "online,customsolrstopword,customsolrstoplabel");
-  }
-
-  @Test
-  public void testLexicalResourcesFromSolrConfigCustomDir() throws Exception {
-    checkLexicalResourcesFromSolrConfig("lexical-resource-check-custom-resource-dir",
-        "online,customsolrstopwordcustomdir,customsolrstoplabelcustomdir");
-  }
-
-  private void checkLexicalResourcesFromSolrConfig(String engineName, String wordsToCheck)
-      throws IOException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("merge-resources", false);
-    params.set(AttributeUtils.getKey(
-        LexicalResourcesCheckClusteringAlgorithm.class, "wordsToCheck"),
-        wordsToCheck);
-
-    // "customsolrstopword" is in stopwords.en, "customsolrstoplabel" is in
-    // stoplabels.mt, so we're expecting only one cluster with label "online".
-    final List<NamedList<Object>> clusters = checkEngine(
-        getClusteringEngine(engineName), 1, params);
-    assertEquals(getLabels(clusters.get(0)), Collections.singletonList("online"));
-  }
-
-  @Test
-  public void testSolrStopWordsUsedInCarrot2Clustering() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("merge-resources", false);
-    params.set(AttributeUtils.getKey(
-        LexicalResourcesCheckClusteringAlgorithm.class, "wordsToCheck"),
-    "online,solrownstopword");
-
-    // "solrownstopword" is in stopwords.txt, so we're expecting
-    // only one cluster with label "online".
-    final List<NamedList<Object>> clusters = checkEngine(
-        getClusteringEngine("lexical-resource-check"), 1, params);
-    assertEquals(getLabels(clusters.get(0)), Collections.singletonList("online"));
-  }
-
-  @Test
-  public void testSolrStopWordsNotDefinedOnAFieldForClustering() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    // Force string fields to be used for clustering. Does not make sense
-    // in a real word, but does the job in the test.
-    params.set(CarrotParams.TITLE_FIELD_NAME, "url");
-    params.set(CarrotParams.SNIPPET_FIELD_NAME, "url");
-    params.set("merge-resources", false);
-    params.set(AttributeUtils.getKey(
-        LexicalResourcesCheckClusteringAlgorithm.class, "wordsToCheck"),
-    "online,solrownstopword");
-
-    final List<NamedList<Object>> clusters = checkEngine(
-        getClusteringEngine("lexical-resource-check"), 2, params);
-    assertEquals(Collections.singletonList("online"), getLabels(clusters.get(0)));
-    assertEquals(Collections.singletonList("solrownstopword"), getLabels(clusters.get(1)));
-  }
-  
-  @Test
-  public void testHighlightingOfMultiValueField() throws Exception {
-    final String snippetWithoutSummary = getLabels(clusterWithHighlighting(
-        false, 30, 3, "multi", 1).get(0)).get(1);
-    assertTrue("Snippet contains first value", snippetWithoutSummary.contains("First"));
-    assertTrue("Snippet contains second value", snippetWithoutSummary.contains("Second"));
-    assertTrue("Snippet contains third value", snippetWithoutSummary.contains("Third"));
-
-    final String snippetWithSummary = getLabels(clusterWithHighlighting(
-        true, 30, 3, "multi", 1).get(0)).get(1);
-    assertTrue("Snippet with summary shorter than full snippet",
-        snippetWithoutSummary.length() > snippetWithSummary.length());
-    assertTrue("Summary covers first value", snippetWithSummary.contains("First"));
-    assertTrue("Summary covers second value", snippetWithSummary.contains("Second"));
-    assertTrue("Summary covers third value", snippetWithSummary.contains("Third"));
-  }
-  
-  @Test
-  public void testConcatenatingMultipleFields() throws Exception {
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.TITLE_FIELD_NAME, "title,heading");
-    params.add(CarrotParams.SNIPPET_FIELD_NAME, "snippet,body");
-
-    final List<String> labels = getLabels(checkEngine(
-        getClusteringEngine("echo"), 1, 1, new TermQuery(new Term("body",
-            "snippet")), params).get(0));
-    assertTrue("Snippet contains third value", labels.get(0).contains("Title field"));
-    assertTrue("Snippet contains third value", labels.get(0).contains("Heading field"));
-    assertTrue("Snippet contains third value", labels.get(1).contains("Snippet field"));
-    assertTrue("Snippet contains third value", labels.get(1).contains("Body field"));
-  }
-
-  @Test
-  public void testHighlightingMultipleFields() throws Exception {
-    final TermQuery query = new TermQuery(new Term("snippet", "content"));
-
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.TITLE_FIELD_NAME, "title,heading");
-    params.add(CarrotParams.SNIPPET_FIELD_NAME, "snippet,body");
-    params.add(CarrotParams.PRODUCE_SUMMARY, Boolean.toString(false));
-    
-    final String snippetWithoutSummary = getLabels(checkEngine(
-        getClusteringEngine("echo"), 1, 1, query, params).get(0)).get(1);
-    assertTrue("Snippet covers snippet field", snippetWithoutSummary.contains("snippet field"));
-    assertTrue("Snippet covers body field", snippetWithoutSummary.contains("body field"));
-
-    params.set(CarrotParams.PRODUCE_SUMMARY, Boolean.toString(true));
-    params.add(CarrotParams.SUMMARY_FRAGSIZE, Integer.toString(30));
-    params.add(CarrotParams.SUMMARY_SNIPPETS, Integer.toString(2));
-    final String snippetWithSummary = getLabels(checkEngine(
-        getClusteringEngine("echo"), 1, 1, query, params).get(0)).get(1);    
-    assertTrue("Snippet with summary shorter than full snippet",
-        snippetWithoutSummary.length() > snippetWithSummary.length());
-    assertTrue("Snippet covers snippet field", snippetWithSummary.contains("snippet field"));
-    assertTrue("Snippet covers body field", snippetWithSummary.contains("body field"));
-
-  }
-
-  @Test
-  public void testOneCarrot2SupportedLanguage() throws Exception {
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.LANGUAGE_FIELD_NAME, "lang");
-
-    final List<String> labels = getLabels(checkEngine(
-        getClusteringEngine("echo"), 1, 1, new TermQuery(new Term("url",
-            "one_supported_language")), params).get(0));
-    assertEquals(3, labels.size());
-    assertEquals("Correct Carrot2 language", LanguageCode.CHINESE_SIMPLIFIED.name(), labels.get(2));
-  }
-  
-  @Test
-  public void testOneCarrot2SupportedLanguageOfMany() throws Exception {
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.LANGUAGE_FIELD_NAME, "lang");
-    
-    final List<String> labels = getLabels(checkEngine(
-        getClusteringEngine("echo"), 1, 1, new TermQuery(new Term("url",
-            "one_supported_language_of_many")), params).get(0));
-    assertEquals(3, labels.size());
-    assertEquals("Correct Carrot2 language", LanguageCode.GERMAN.name(), labels.get(2));
-  }
-  
-  @Test
-  public void testLanguageCodeMapping() throws Exception {
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.LANGUAGE_FIELD_NAME, "lang");
-    params.add(CarrotParams.LANGUAGE_CODE_MAP, "POLISH:pl");
-    
-    final List<String> labels = getLabels(checkEngine(
-        getClusteringEngine("echo"), 1, 1, new TermQuery(new Term("url",
-            "one_supported_language_of_many")), params).get(0));
-    assertEquals(3, labels.size());
-    assertEquals("Correct Carrot2 language", LanguageCode.POLISH.name(), labels.get(2));
-  }
-  
-  @Test
-  public void testPassingOfCustomFields() throws Exception {
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.CUSTOM_FIELD_NAME, "intfield_i:intfield");
-    params.add(CarrotParams.CUSTOM_FIELD_NAME, "floatfield_f:floatfield");
-    params.add(CarrotParams.CUSTOM_FIELD_NAME, "heading:multi");
-    
-    // Let the echo mock clustering algorithm know which custom field to echo
-    params.add("custom-fields", "intfield,floatfield,multi");
-    
-    final List<String> labels = getLabels(checkEngine(
-        getClusteringEngine("echo"), 1, 1, new TermQuery(new Term("url",
-            "custom_fields")), params).get(0));
-    assertEquals(5, labels.size());
-    assertEquals("Integer field", "10", labels.get(2));
-    assertEquals("Float field", "10.5", labels.get(3));
-    assertEquals("List field", "[first, second]", labels.get(4));
-  }
-
-  @Test
-  public void testCustomTokenizer() throws Exception {
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.TITLE_FIELD_NAME, "title");
-    params.add(CarrotParams.SNIPPET_FIELD_NAME, "snippet");
-
-    final List<String> labels = getLabels(checkEngine(
-        getClusteringEngine("custom-duplicating-tokenizer"), 1, 15, new TermQuery(new Term("title",
-            "field")), params).get(0));
-    
-    // The custom test tokenizer duplicates each token's text
-    assertTrue("First token", labels.get(0).contains("TitleTitle"));
-  }
-  
-  @Test
-  public void testCustomStemmer() throws Exception {
-    final ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CarrotParams.TITLE_FIELD_NAME, "title");
-    params.add(CarrotParams.SNIPPET_FIELD_NAME, "snippet");
-    
-    final List<String> labels = getLabels(checkEngine(
-        getClusteringEngine("custom-duplicating-stemmer"), 1, 12, new TermQuery(new Term("title",
-            "field")), params).get(0));
-    
-    // The custom test stemmer duplicates and lowercases each token's text
-    assertTrue("First token", labels.get(0).contains("titletitle"));
-  }
-
-  @Test
-  public void testDefaultEngineOrder() throws Exception {
-    ClusteringComponent comp = (ClusteringComponent) h.getCore().getSearchComponent("clustering-name-default");
-    Map<String,SearchClusteringEngine> engines = getSearchClusteringEngines(comp);
-    assertEquals(
-        Arrays.asList("stc", "default", "mock"),
-        new ArrayList<>(engines.keySet()));
-    assertEquals(
-        LingoClusteringAlgorithm.class,
-        ((CarrotClusteringEngine) engines.get(ClusteringEngine.DEFAULT_ENGINE_NAME)).getClusteringAlgorithmClass());
-  }
-
-  @Test
-  public void testDeclarationEngineOrder() throws Exception {
-    ClusteringComponent comp = (ClusteringComponent) h.getCore().getSearchComponent("clustering-name-decl-order");
-    Map<String,SearchClusteringEngine> engines = getSearchClusteringEngines(comp);
-    assertEquals(
-        Arrays.asList("unavailable", "lingo", "stc", "mock", "default"),
-        new ArrayList<>(engines.keySet()));
-    assertEquals(
-        LingoClusteringAlgorithm.class,
-        ((CarrotClusteringEngine) engines.get(ClusteringEngine.DEFAULT_ENGINE_NAME)).getClusteringAlgorithmClass());
-  }
-
-  @Test
-  public void testDeclarationNameDuplicates() throws Exception {
-    ClusteringComponent comp = (ClusteringComponent) h.getCore().getSearchComponent("clustering-name-dups");
-    Map<String,SearchClusteringEngine> engines = getSearchClusteringEngines(comp);
-    assertEquals(
-        Arrays.asList("", "default"),
-        new ArrayList<>(engines.keySet()));
-    assertEquals(
-        MockClusteringAlgorithm.class,
-        ((CarrotClusteringEngine) engines.get(ClusteringEngine.DEFAULT_ENGINE_NAME)).getClusteringAlgorithmClass());
-  }
-
-  private CarrotClusteringEngine getClusteringEngine(String engineName) {
-    ClusteringComponent comp = (ClusteringComponent) h.getCore()
-            .getSearchComponent("clustering");
-    assertNotNull("clustering component should not be null", comp);
-    CarrotClusteringEngine engine = 
-        (CarrotClusteringEngine) getSearchClusteringEngines(comp).get(engineName);
-    assertNotNull("clustering engine for name: " + engineName
-            + " should not be null", engine);
-    return engine;
-  }
-
-  private List<NamedList<Object>> checkEngine(CarrotClusteringEngine engine,
-                            int expectedNumClusters) throws IOException {
-    return checkEngine(engine, numberOfDocs, expectedNumClusters, new MatchAllDocsQuery(), new ModifiableSolrParams());
-  }
-
-  private List<NamedList<Object>> checkEngine(CarrotClusteringEngine engine,
-                            int expectedNumClusters, SolrParams clusteringParams) throws IOException {
-    return checkEngine(engine, numberOfDocs, expectedNumClusters, new MatchAllDocsQuery(), clusteringParams);
-  }
-
-
-  private List<NamedList<Object>> checkEngine(CarrotClusteringEngine engine, int expectedNumDocs,
-                           int expectedNumClusters, Query query, SolrParams clusteringParams) throws IOException {
-    // Get all documents to cluster
-    return h.getCore().withSearcher(searcher -> {
-      DocList docList = searcher.getDocList(query, (Query) null, new Sort(), 0,
-          numberOfDocs);
-      assertEquals("docList size", expectedNumDocs, docList.matches());
-
-      ModifiableSolrParams solrParams = new ModifiableSolrParams();
-      solrParams.add(clusteringParams);
-
-      // Perform clustering
-      LocalSolrQueryRequest req = new LocalSolrQueryRequest(h.getCore(), solrParams);
-      Map<SolrDocument,Integer> docIds = new HashMap<>(docList.size());
-      SolrDocumentList solrDocList = ClusteringComponent.docListToSolrDocumentList( docList, searcher, engine.getFieldsToLoad(req), docIds );
-
-      @SuppressWarnings("unchecked")
-      List<NamedList<Object>> results = (List<NamedList<Object>>) engine.cluster(query, solrDocList, docIds, req);
-      req.close();
-      assertEquals("number of clusters: " + results, expectedNumClusters, results.size());
-      checkClusters(results, false);
-      return results;
-    });
-  }
-
-  private void checkClusters(List<NamedList<Object>> results, int expectedDocCount,
-                             int expectedLabelCount, int expectedSubclusterCount) {
-    for (int i = 0; i < results.size(); i++) {
-      NamedList<Object> cluster = results.get(i);
-      checkCluster(cluster, expectedDocCount, expectedLabelCount,
-              expectedSubclusterCount);
-    }
-  }
-
-  private void checkClusters(List<NamedList<Object>> results, boolean hasSubclusters) {
-    for (int i = 0; i < results.size(); i++) {
-      checkCluster(results.get(i), hasSubclusters);
-    }
-  }
-
-  private void checkCluster(NamedList<Object> cluster, boolean hasSubclusters) {
-    List<Object> docs = getDocs(cluster);
-    assertNotNull("docs is null and it shouldn't be", docs);
-    for (int j = 0; j < docs.size(); j++) {
-      Object id = docs.get(j);
-      assertNotNull("id is null and it shouldn't be", id);
-    }
-
-    List<String> labels = getLabels(cluster);
-    assertNotNull("labels is null but it shouldn't be", labels);
-
-    if (hasSubclusters) {
-      List<NamedList<Object>> subclusters = getSubclusters(cluster);
-      assertNotNull("subclusters is null but it shouldn't be", subclusters);
-    }
-  }
-
-  private void checkCluster(NamedList<Object> cluster, int expectedDocCount,
-                            int expectedLabelCount, int expectedSubclusterCount) {
-    checkCluster(cluster, expectedSubclusterCount > 0);
-    assertEquals("number of docs in cluster", expectedDocCount,
-            getDocs(cluster).size());
-    assertEquals("number of labels in cluster", expectedLabelCount,
-            getLabels(cluster).size());
-
-    if (expectedSubclusterCount > 0) {
-      List<NamedList<Object>> subclusters = getSubclusters(cluster);
-      assertEquals("numClusters", expectedSubclusterCount, subclusters.size());
-      assertEquals("number of subclusters in cluster",
-              expectedSubclusterCount, subclusters.size());
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private List<NamedList<Object>> getSubclusters(NamedList<Object> cluster) {
-    return (List<NamedList<Object>>) cluster.get("clusters");
-  }
-
-  @SuppressWarnings("unchecked")
-  private List<String> getLabels(NamedList<Object> cluster) {
-    return (List<String>) cluster.get("labels");
-  }
-
-  private Double getScore(NamedList<Object> cluster) {
-    return (Double) cluster.get("score");
-  }
-
-  private Boolean isOtherTopics(NamedList<Object> cluster) {
-    return (Boolean)cluster.get("other-topics");
-  }
-
-  @SuppressWarnings("unchecked")
-  private List<Object> getDocs(NamedList<Object> cluster) {
-    return (List<Object>) cluster.get("docs");
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/DuplicatingStemmerFactory.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/DuplicatingStemmerFactory.java
deleted file mode 100644
index f1f883d..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/DuplicatingStemmerFactory.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import org.carrot2.core.LanguageCode;
-import org.carrot2.text.linguistic.IStemmer;
-import org.carrot2.text.linguistic.IStemmerFactory;
-
-public class DuplicatingStemmerFactory implements IStemmerFactory {
-  @Override
-  public IStemmer getStemmer(LanguageCode language) {
-    return new IStemmer() {
-      @Override
-      public CharSequence stem(CharSequence word) {
-        return word.toString() + word.toString();
-      }
-    };
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/DuplicatingTokenizerFactory.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/DuplicatingTokenizerFactory.java
deleted file mode 100644
index 51820e7..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/DuplicatingTokenizerFactory.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.io.IOException;
-import java.io.Reader;
-
-import org.carrot2.core.LanguageCode;
-import org.carrot2.text.analysis.ExtendedWhitespaceTokenizer;
-import org.carrot2.text.analysis.ITokenizer;
-import org.carrot2.text.linguistic.ITokenizerFactory;
-import org.carrot2.text.util.MutableCharArray;
-
-public class DuplicatingTokenizerFactory implements ITokenizerFactory {
-  @Override
-  public ITokenizer getTokenizer(LanguageCode language) {
-    return new ITokenizer() {
-      private final ExtendedWhitespaceTokenizer delegate = new ExtendedWhitespaceTokenizer();
-      
-      @Override
-      public void setTermBuffer(MutableCharArray buffer) {
-        delegate.setTermBuffer(buffer);
-        buffer.reset(buffer.toString() + buffer.toString());
-      }
-      
-      @Override
-      public void reset(Reader input) {
-        delegate.reset(input);
-      }
-      
-      @Override
-      public short nextToken() throws IOException {
-        return delegate.nextToken();
-      }
-    };
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoClusteringAlgorithm.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoClusteringAlgorithm.java
deleted file mode 100644
index 2c95da3..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoClusteringAlgorithm.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carrot2.core.Cluster;
-import org.carrot2.core.Document;
-import org.carrot2.core.IClusteringAlgorithm;
-import org.carrot2.core.ProcessingComponentBase;
-import org.carrot2.core.ProcessingException;
-import org.carrot2.core.attribute.AttributeNames;
-import org.carrot2.core.attribute.Processing;
-import org.carrot2.util.attribute.Attribute;
-import org.carrot2.util.attribute.Bindable;
-import org.carrot2.util.attribute.Input;
-import org.carrot2.util.attribute.Output;
-
-/**
- * A mock Carrot2 clustering algorithm that outputs input documents as clusters.
- * Useful only in tests.
- */
-@Bindable(prefix = "EchoClusteringAlgorithm")
-public class EchoClusteringAlgorithm extends ProcessingComponentBase implements
-        IClusteringAlgorithm {
-  @Input
-  @Processing
-  @Attribute(key = AttributeNames.DOCUMENTS)
-  public List<Document> documents;
-
-  @Output
-  @Processing
-  @Attribute(key = AttributeNames.CLUSTERS)
-  public List<Cluster> clusters;
-
-  @Input
-  @Processing
-  @Attribute(key = "custom-fields")
-  public String customFields = "";
-
-  
-  @Override
-  public void process() throws ProcessingException {
-    clusters = new ArrayList<>();
-    
-    for (Document document : documents) {
-      final Cluster cluster = new Cluster();
-      cluster.addPhrases(document.getTitle(), document.getSummary());
-      if (document.getLanguage() != null) {
-        cluster.addPhrases(document.getLanguage().name());
-      }
-      for (String field : customFields.split(",")) {
-        Object value = document.getField(field);
-        if (value != null) {
-          cluster.addPhrases(value.toString());
-        }
-      }
-      cluster.addDocuments(document);
-      clusters.add(cluster);
-    }
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoStemsClusteringAlgorithm.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoStemsClusteringAlgorithm.java
deleted file mode 100644
index f39fcd9..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoStemsClusteringAlgorithm.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carrot2.core.Cluster;
-import org.carrot2.core.Document;
-import org.carrot2.core.IClusteringAlgorithm;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.core.ProcessingComponentBase;
-import org.carrot2.core.ProcessingException;
-import org.carrot2.core.attribute.AttributeNames;
-import org.carrot2.core.attribute.Processing;
-import org.carrot2.text.preprocessing.PreprocessingContext;
-import org.carrot2.text.preprocessing.PreprocessingContext.AllStems;
-import org.carrot2.text.preprocessing.PreprocessingContext.AllTokens;
-import org.carrot2.text.preprocessing.PreprocessingContext.AllWords;
-import org.carrot2.text.preprocessing.pipeline.BasicPreprocessingPipeline;
-import org.carrot2.util.attribute.Attribute;
-import org.carrot2.util.attribute.Bindable;
-import org.carrot2.util.attribute.Input;
-import org.carrot2.util.attribute.Output;
-
-/**
- * A mock Carrot2 clustering algorithm that outputs stem of each token of each
- * document as a separate cluster. Useful only in tests.
- */
-@Bindable(prefix = "EchoTokensClusteringAlgorithm")
-public class EchoStemsClusteringAlgorithm extends ProcessingComponentBase
-    implements IClusteringAlgorithm {
-  @Input
-  @Processing
-  @Attribute(key = AttributeNames.DOCUMENTS)
-  public List<Document> documents;
-  
-  @Output
-  @Processing
-  @Attribute(key = AttributeNames.CLUSTERS)
-  public List<Cluster> clusters;
-
-  public BasicPreprocessingPipeline preprocessing = new BasicPreprocessingPipeline();
-  
-  @Override
-  public void process() throws ProcessingException {
-    final PreprocessingContext preprocessingContext = preprocessing.preprocess(
-        documents, "", LanguageCode.ENGLISH);
-    final AllTokens allTokens = preprocessingContext.allTokens;
-    final AllWords allWords = preprocessingContext.allWords;
-    final AllStems allStems = preprocessingContext.allStems;
-    clusters = new ArrayList<>();
-    for (int i = 0; i < allTokens.image.length; i++) {
-      if (allTokens.wordIndex[i] >= 0) {
-        clusters.add(new Cluster(new String(
-            allStems.image[allWords.stemIndex[allTokens.wordIndex[i]]])));
-      }
-    }
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoTokensClusteringAlgorithm.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoTokensClusteringAlgorithm.java
deleted file mode 100644
index 32e47d8..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/EchoTokensClusteringAlgorithm.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carrot2.core.Cluster;
-import org.carrot2.core.Document;
-import org.carrot2.core.IClusteringAlgorithm;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.core.ProcessingComponentBase;
-import org.carrot2.core.ProcessingException;
-import org.carrot2.core.attribute.AttributeNames;
-import org.carrot2.core.attribute.Processing;
-import org.carrot2.text.preprocessing.PreprocessingContext;
-import org.carrot2.text.preprocessing.pipeline.BasicPreprocessingPipeline;
-import org.carrot2.util.attribute.Attribute;
-import org.carrot2.util.attribute.Bindable;
-import org.carrot2.util.attribute.Input;
-import org.carrot2.util.attribute.Output;
-
-
-/**
- * A mock Carrot2 clustering algorithm that outputs each token of each document
- * as a separate cluster. Useful only in tests.
- */
-@Bindable(prefix = "EchoTokensClusteringAlgorithm")
-public class EchoTokensClusteringAlgorithm extends ProcessingComponentBase
-    implements IClusteringAlgorithm {
-  @Input
-  @Processing
-  @Attribute(key = AttributeNames.DOCUMENTS)
-  public List<Document> documents;
-  
-  @Output
-  @Processing
-  @Attribute(key = AttributeNames.CLUSTERS)
-  public List<Cluster> clusters;
-  
-  public BasicPreprocessingPipeline preprocessing = new BasicPreprocessingPipeline();
-  
-  @Override
-  public void process() throws ProcessingException {
-    final PreprocessingContext preprocessingContext = preprocessing.preprocess(
-        documents, "", LanguageCode.ENGLISH);
-    clusters = new ArrayList<>();
-    for (char[] token : preprocessingContext.allTokens.image) {
-      if (token != null) {
-        clusters.add(new Cluster(new String(token)));
-      }
-    }
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/LexicalResourcesCheckClusteringAlgorithm.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/LexicalResourcesCheckClusteringAlgorithm.java
deleted file mode 100644
index 9f69040..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/LexicalResourcesCheckClusteringAlgorithm.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carrot2.core.Cluster;
-import org.carrot2.core.IClusteringAlgorithm;
-import org.carrot2.core.LanguageCode;
-import org.carrot2.core.ProcessingComponentBase;
-import org.carrot2.core.ProcessingException;
-import org.carrot2.core.attribute.AttributeNames;
-import org.carrot2.core.attribute.Processing;
-import org.carrot2.text.linguistic.ILexicalData;
-import org.carrot2.text.preprocessing.pipeline.BasicPreprocessingPipeline;
-import org.carrot2.text.util.MutableCharArray;
-import org.carrot2.util.attribute.Attribute;
-import org.carrot2.util.attribute.Bindable;
-import org.carrot2.util.attribute.Input;
-import org.carrot2.util.attribute.Output;
-
-/**
- * A mock implementation of Carrot2 clustering algorithm for testing whether the
- * customized lexical resource lookup works correctly. This algorithm ignores
- * the input documents and instead for each word from {@link #wordsToCheck}, it
- * outputs a cluster labeled with the word only if the word is neither a stop
- * word nor a stop label.
- */
-@Bindable(prefix = "LexicalResourcesCheckClusteringAlgorithm")
-public class LexicalResourcesCheckClusteringAlgorithm extends
-    ProcessingComponentBase implements IClusteringAlgorithm {
-
-  @Output
-  @Processing
-  @Attribute(key = AttributeNames.CLUSTERS)
-  public List<Cluster> clusters;
-
-  @Input
-  @Processing
-  @Attribute
-  public String wordsToCheck;
-
-  public BasicPreprocessingPipeline preprocessing = new BasicPreprocessingPipeline();
-
-  @Override
-  public void process() throws ProcessingException {
-    clusters = new ArrayList<>();
-    if (wordsToCheck == null) {
-      return;
-    }
-
-    // Test with Maltese so that the English clustering performed in other tests
-    // is not affected by the test stopwords and stoplabels.
-    ILexicalData lexicalData = preprocessing.lexicalDataFactory
-        .getLexicalData(LanguageCode.MALTESE);
-
-    for (String word : wordsToCheck.split(",")) {
-      if (!lexicalData.isCommonWord(new MutableCharArray(word))
-          && !lexicalData.isStopLabel(word)) {
-        clusters.add(new Cluster(word));
-      }
-    }
-  }
-}
diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/MockClusteringAlgorithm.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/MockClusteringAlgorithm.java
deleted file mode 100644
index ba978a5..0000000
--- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/MockClusteringAlgorithm.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.clustering.carrot2;
-import org.carrot2.core.*;
-import org.carrot2.core.attribute.AttributeNames;
-import org.carrot2.core.attribute.Processing;
-import org.carrot2.util.attribute.*;
-import org.carrot2.util.attribute.constraint.IntRange;
-
-import java.util.ArrayList;
-import java.util.List;
-
-@Bindable(prefix = "MockClusteringAlgorithm")
-public class MockClusteringAlgorithm extends ProcessingComponentBase implements
-        IClusteringAlgorithm {
-  @Input
-  @Processing
-  @Attribute(key = AttributeNames.DOCUMENTS)
-  public List<Document> documents;
-
-  @Output
-  @Processing
-  @Attribute(key = AttributeNames.CLUSTERS)
-  public List<Cluster> clusters;
-
-  @Input
-  @Processing
-  @Attribute
-  @IntRange(min = 1, max = 5)
-  public int depth = 2;
-
-  @Input
-  @Processing
-  @Attribute
-  @IntRange(min = 1, max = 5)
-  public int labels = 1;
-
-  @Input
-  @Processing
-  @Attribute
-  @IntRange(min = 0)
-  public int maxClusters = 0;
-
-  @Input
-  @Processing
-  @Attribute
-  public int otherTopicsModulo = 0;
-
-  @Override
-  public void process() throws ProcessingException {
-    clusters = new ArrayList<>();
-    if (documents == null) {
-      return;
-    }
-
-    if (maxClusters > 0) {
-      documents = documents.subList(0, maxClusters);
-    }
-
-    int documentIndex = 1;
-    for (Document document : documents) {
-      StringBuilder label = new StringBuilder("Cluster " + documentIndex);
-      Cluster cluster = createCluster(label.toString(), documentIndex, document);
-      clusters.add(cluster);
-      for (int i = 1; i <= depth; i++) {
-        label.append(".");
-        label.append(i);
-        Cluster newCluster = createCluster(label.toString(), documentIndex, document);
-        cluster.addSubclusters(createCluster(label.toString(), documentIndex, document), newCluster);
-        cluster = newCluster;
-      }
-      documentIndex++;
-    }
-  }
-
-  private Cluster createCluster(String labelBase, int documentIndex, Document... documents) {
-    Cluster cluster = new Cluster();
-    cluster.setScore(documentIndex * 0.25);
-    if (otherTopicsModulo != 0 && documentIndex % otherTopicsModulo == 0)
-    {
-      cluster.setOtherTopics(true);
-    }
-    for (int i = 0; i < labels; i++) {
-      cluster.addPhrases(labelBase + "#" + (i + 1));
-    }
-    cluster.addDocuments(documents);
-    return cluster;
-  }
-}
diff --git a/solr/example/README.txt b/solr/example/README.txt
index 1bfb598..f771037 100644
--- a/solr/example/README.txt
+++ b/solr/example/README.txt
@@ -63,8 +63,8 @@ statements in the solrconfig.xml file to reference plugin jars outside of
 this directory for loading "contrib" plugins via relative paths.  
 
 If you make a copy of this example server and wish to use the 
-ExtractingRequestHandler (SolrCell), DataImportHandler (DIH), the 
-clustering component, or any other modules in "contrib", you will need to 
+ExtractingRequestHandler (SolrCell), DataImportHandler (DIH), 
+or any other modules in "contrib", you will need to 
 copy the required jars or update the paths to those jars in your 
 solrconfig.xml.
 
diff --git a/solr/example/example-DIH/solr/db/conf/clustering/carrot2/kmeans-attributes.xml b/solr/example/example-DIH/solr/db/conf/clustering/carrot2/kmeans-attributes.xml
deleted file mode 100644
index d802465..0000000
--- a/solr/example/example-DIH/solr/db/conf/clustering/carrot2/kmeans-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the bisecting k-means clustering algorithm.
-  
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/example/example-DIH/solr/db/conf/clustering/carrot2/lingo-attributes.xml b/solr/example/example-DIH/solr/db/conf/clustering/carrot2/lingo-attributes.xml
deleted file mode 100644
index 4bf1360..0000000
--- a/solr/example/example-DIH/solr/db/conf/clustering/carrot2/lingo-attributes.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<!-- 
-  Default configuration for the Lingo clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <!-- 
-          The language to assume for clustered documents.
-          For a list of allowed values, see: 
-          http://download.carrot2.org/stable/manual/#section.attribute.lingo.MultilingualClustering.defaultLanguage
-          -->
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="LingoClusteringAlgorithm.desiredClusterCountBase">
-            <value type="java.lang.Integer" value="20"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
\ No newline at end of file
diff --git a/solr/example/example-DIH/solr/db/conf/clustering/carrot2/stc-attributes.xml b/solr/example/example-DIH/solr/db/conf/clustering/carrot2/stc-attributes.xml
deleted file mode 100644
index c1bf110..0000000
--- a/solr/example/example-DIH/solr/db/conf/clustering/carrot2/stc-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the STC clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/kmeans-attributes.xml b/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/kmeans-attributes.xml
deleted file mode 100644
index d802465..0000000
--- a/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/kmeans-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the bisecting k-means clustering algorithm.
-  
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/lingo-attributes.xml b/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/lingo-attributes.xml
deleted file mode 100644
index 4bf1360..0000000
--- a/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/lingo-attributes.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<!-- 
-  Default configuration for the Lingo clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <!-- 
-          The language to assume for clustered documents.
-          For a list of allowed values, see: 
-          http://download.carrot2.org/stable/manual/#section.attribute.lingo.MultilingualClustering.defaultLanguage
-          -->
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="LingoClusteringAlgorithm.desiredClusterCountBase">
-            <value type="java.lang.Integer" value="20"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
\ No newline at end of file
diff --git a/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/stc-attributes.xml b/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/stc-attributes.xml
deleted file mode 100644
index c1bf110..0000000
--- a/solr/example/example-DIH/solr/mail/conf/clustering/carrot2/stc-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the STC clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/kmeans-attributes.xml b/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/kmeans-attributes.xml
deleted file mode 100644
index d802465..0000000
--- a/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/kmeans-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the bisecting k-means clustering algorithm.
-  
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/lingo-attributes.xml b/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/lingo-attributes.xml
deleted file mode 100644
index 4bf1360..0000000
--- a/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/lingo-attributes.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<!-- 
-  Default configuration for the Lingo clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <!-- 
-          The language to assume for clustered documents.
-          For a list of allowed values, see: 
-          http://download.carrot2.org/stable/manual/#section.attribute.lingo.MultilingualClustering.defaultLanguage
-          -->
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="LingoClusteringAlgorithm.desiredClusterCountBase">
-            <value type="java.lang.Integer" value="20"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
\ No newline at end of file
diff --git a/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/stc-attributes.xml b/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/stc-attributes.xml
deleted file mode 100644
index c1bf110..0000000
--- a/solr/example/example-DIH/solr/solr/conf/clustering/carrot2/stc-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the STC clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/example/files/conf/solrconfig.xml b/solr/example/files/conf/solrconfig.xml
index 2c48ac1..7b5b055 100644
--- a/solr/example/files/conf/solrconfig.xml
+++ b/solr/example/files/conf/solrconfig.xml
@@ -75,9 +75,6 @@
   <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
 
-  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
-
   <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
 
@@ -1031,8 +1028,6 @@
     </arr>
   </requestHandler>
 
-  <!-- Clustering Component. (Omitted here. See the default Solr example for a typical configuration.) -->
-
   <!-- Terms Component
 
        http://wiki.apache.org/solr/TermsComponent
diff --git a/solr/licenses/attributes-binder-1.3.3.jar.sha1 b/solr/licenses/attributes-binder-1.3.3.jar.sha1
deleted file mode 100644
index bad28fb..0000000
--- a/solr/licenses/attributes-binder-1.3.3.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7f13f63e2e213f6ea38364836408d2dc11f29804
diff --git a/solr/licenses/attributes-binder-LICENSE-ASL.txt b/solr/licenses/attributes-binder-LICENSE-ASL.txt
deleted file mode 100644
index d645695..0000000
--- a/solr/licenses/attributes-binder-LICENSE-ASL.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/solr/licenses/attributes-binder-NOTICE.txt b/solr/licenses/attributes-binder-NOTICE.txt
deleted file mode 100644
index 6ff02dc..0000000
--- a/solr/licenses/attributes-binder-NOTICE.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-=========================================================================
-==     Carrot2 Attributes Binder Notice                                ==
-=========================================================================
-Copyright (C) 2002-2010, Dawid Weiss, Stanislaw Osinski.
-All rights reserved.
-
-This product includes software developed by the Carrot2 Project.
-
-See http://project.carrot2.org/
diff --git a/solr/licenses/carrot2-guava-18.0.jar.sha1 b/solr/licenses/carrot2-guava-18.0.jar.sha1
deleted file mode 100644
index ce50fe3..0000000
--- a/solr/licenses/carrot2-guava-18.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-539317dc171b8c92cca964e87686602800cf19b0
diff --git a/solr/licenses/carrot2-guava-LICENSE-ASL.txt b/solr/licenses/carrot2-guava-LICENSE-ASL.txt
deleted file mode 100644
index d645695..0000000
--- a/solr/licenses/carrot2-guava-LICENSE-ASL.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/solr/licenses/carrot2-guava-NOTICE.txt b/solr/licenses/carrot2-guava-NOTICE.txt
deleted file mode 100644
index e87a0e1..0000000
--- a/solr/licenses/carrot2-guava-NOTICE.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-This product includes software developed by
-Google, Inc. (http://code.google.com/p/guava-libraries/)
-
-Repacked Carrot2 Guava at:
-https://github.com/carrot2/lib-repackaged
\ No newline at end of file
diff --git a/solr/licenses/carrot2-mini-3.16.0.jar.sha1 b/solr/licenses/carrot2-mini-3.16.0.jar.sha1
deleted file mode 100644
index 0b34d73..0000000
--- a/solr/licenses/carrot2-mini-3.16.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6bb27fd0dfe24a5671d9751a943728a54be48ed7
diff --git a/solr/licenses/carrot2-mini-LICENSE-BSD_LIKE.txt b/solr/licenses/carrot2-mini-LICENSE-BSD_LIKE.txt
deleted file mode 100644
index b2a38f3..0000000
--- a/solr/licenses/carrot2-mini-LICENSE-BSD_LIKE.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-
-Carrot2 Project
-
-Copyright (C) 2002-2013, Dawid Weiss, Stanisław Osiński.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-- Redistributions of  source code must  retain the above  copyright notice, this
-  list of conditions and the following disclaimer. 
-
-- Redistributions in binary form must reproduce the above copyright notice, this
-  list of conditions and the following  disclaimer in  the documentation  and/or
-  other materials provided with the distribution. 
-
-- Neither the name  of the Carrot2 Project  nor  the names  of  its contributors 
-  may  be used  to endorse  or  promote  products derived   from  this  software 
-  without specific prior written permission.
-
-- We kindly request that you include in the end-user documentation provided with
-  the redistribution and/or in the software itself an acknowledgement equivalent 
-  to  the  following:  "This product includes  software developed by the Carrot2 
-  Project."
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"  AND
-ANY EXPRESS OR  IMPLIED WARRANTIES, INCLUDING,  BUT NOT LIMITED  TO, THE IMPLIED
-WARRANTIES  OF  MERCHANTABILITY  AND  FITNESS  FOR  A  PARTICULAR  PURPOSE   ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE  FOR
-ANY DIRECT, INDIRECT, INCIDENTAL,  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL  DAMAGES
-(INCLUDING, BUT  NOT LIMITED  TO, PROCUREMENT  OF SUBSTITUTE  GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS;  OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND  ON
-ANY  THEORY  OF  LIABILITY,  WHETHER  IN  CONTRACT,  STRICT  LIABILITY,  OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE)  ARISING IN ANY WAY  OUT OF THE USE  OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/solr/licenses/carrot2-mini-NOTICE.txt b/solr/licenses/carrot2-mini-NOTICE.txt
deleted file mode 100644
index 624dbf3..0000000
--- a/solr/licenses/carrot2-mini-NOTICE.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-=========================================================================
-==     Carrot2 Notice                                                  ==
-=========================================================================
-Copyright (C) 2002-2013, Dawid Weiss, Stanislaw Osinski.
-Portions (C) Contributors listed in "carrot2.CONTRIBUTORS" file.
-All rights reserved.
-
-This product includes software developed by the Carrot2 Project.
-
-See http://project.carrot2.org/
diff --git a/solr/licenses/simple-xml-safe-2.7.1.jar.sha1 b/solr/licenses/simple-xml-safe-2.7.1.jar.sha1
deleted file mode 100644
index 75e4299..0000000
--- a/solr/licenses/simple-xml-safe-2.7.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-045fda5ac6087bc82a209d8cdb73f8d0dbdcfc7b
diff --git a/solr/licenses/simple-xml-safe-LICENSE-ASL.txt b/solr/licenses/simple-xml-safe-LICENSE-ASL.txt
deleted file mode 100644
index 57bc88a..0000000
--- a/solr/licenses/simple-xml-safe-LICENSE-ASL.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
diff --git a/solr/licenses/simple-xml-safe-NOTICE.txt b/solr/licenses/simple-xml-safe-NOTICE.txt
deleted file mode 100644
index 154ac0a..0000000
--- a/solr/licenses/simple-xml-safe-NOTICE.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-This product includes software developed by
-the SimpleXML project (http://simple.sourceforge.net).
\ No newline at end of file
diff --git a/solr/server/README.txt b/solr/server/README.txt
index d4b421c..bfdbc61 100644
--- a/solr/server/README.txt
+++ b/solr/server/README.txt
@@ -93,8 +93,8 @@ statements in the solrconfig.xml file to reference plugin jars outside of
 this directory for loading "contrib" plugins via relative paths.  
 
 If you make a copy of this example server and wish to use the 
-ExtractingRequestHandler (SolrCell), DataImportHandler (DIH), the 
-clustering component, or any other modules in "contrib", you will need to 
+ExtractingRequestHandler (SolrCell), DataImportHandler (DIH), 
+or any other modules in "contrib", you will need to 
 copy the required jars or update the paths to those jars in your 
 solrconfig.xml.
 
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/README.txt b/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/README.txt
deleted file mode 100644
index 3d90ec7..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/README.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-An override location of the clustering algorithm's resources 
-attribute definitions and lexical resources.
-
-A directory from which to load algorithm-specific stop words,
-stop labels and attribute definition XMLs. 
-
-For an overview of Carrot2 lexical resources, see:
-http://download.carrot2.org/head/manual/#chapter.lexical-resources
-
-For an overview of Lingo3G lexical resources, see:
-http://download.carrotsearch.com/lingo3g/manual/#chapter.lexical-resources
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/kmeans-attributes.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/kmeans-attributes.xml
deleted file mode 100644
index d802465..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/kmeans-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the bisecting k-means clustering algorithm.
-  
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/lingo-attributes.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/lingo-attributes.xml
deleted file mode 100644
index 4bf1360..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/lingo-attributes.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<!-- 
-  Default configuration for the Lingo clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <!-- 
-          The language to assume for clustered documents.
-          For a list of allowed values, see: 
-          http://download.carrot2.org/stable/manual/#section.attribute.lingo.MultilingualClustering.defaultLanguage
-          -->
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="LingoClusteringAlgorithm.desiredClusterCountBase">
-            <value type="java.lang.Integer" value="20"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
\ No newline at end of file
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/stc-attributes.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/stc-attributes.xml
deleted file mode 100644
index c1bf110..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/clustering/carrot2/stc-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the STC clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
index 3d8082c..43ab2fd 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
@@ -75,9 +75,6 @@
   <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
 
-  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
-
   <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
 
@@ -1230,95 +1227,6 @@
     </arr>
   </requestHandler>
 
-  <!-- Clustering Component
-
-       You'll need to set the solr.clustering.enabled system property
-       when running solr to run with clustering enabled:
-       -Dsolr.clustering.enabled=true
-
-       https://lucene.apache.org/solr/guide/result-clustering.html
-    -->
-  <searchComponent name="clustering"
-                   enable="${solr.clustering.enabled:false}"
-                   class="solr.clustering.ClusteringComponent" >
-    <!--
-    Declaration of "engines" (clustering algorithms).
-
-    The open source algorithms from Carrot2.org project:
-      * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
-      * org.carrot2.clustering.stc.STCClusteringAlgorithm
-      * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
-    See http://project.carrot2.org/algorithms.html for more information.
-
-    Commercial algorithm Lingo3G (needs to be installed separately):
-      * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
-    -->
-
-    <lst name="engine">
-      <str name="name">lingo3g</str>
-      <bool name="optional">true</bool>
-      <str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
-      <str name="carrot.resourcesDir">clustering/carrot2</str>
-    </lst>
-
-    <lst name="engine">
-      <str name="name">lingo</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
-      <str name="carrot.resourcesDir">clustering/carrot2</str>
-    </lst>
-
-    <lst name="engine">
-      <str name="name">stc</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
-      <str name="carrot.resourcesDir">clustering/carrot2</str>
-    </lst>
-
-    <lst name="engine">
-      <str name="name">kmeans</str>
-      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
-      <str name="carrot.resourcesDir">clustering/carrot2</str>
-    </lst>
-  </searchComponent>
-
-  <!-- A request handler for demonstrating the clustering component.
-       This is meant as an example.
-       In reality you will likely want to add the component to your
-       already specified request handlers.
-    -->
-  <requestHandler name="/clustering"
-                  startup="lazy"
-                  enable="${solr.clustering.enabled:false}"
-                  class="solr.SearchHandler">
-    <lst name="defaults">
-      <bool name="clustering">true</bool>
-      <bool name="clustering.results">true</bool>
-      <!-- Field name with the logical "title" of a each document (optional) -->
-      <str name="carrot.title">name</str>
-      <!-- Field name with the logical "URL" of a each document (optional) -->
-      <str name="carrot.url">id</str>
-      <!-- Field name with the logical "content" of a each document (optional) -->
-      <str name="carrot.snippet">features</str>
-      <!-- Apply highlighter to the title/ content and use this for clustering. -->
-      <bool name="carrot.produceSummary">true</bool>
-      <!-- the maximum number of labels per cluster -->
-      <!--<int name="carrot.numDescriptions">5</int>-->
-      <!-- produce sub clusters -->
-      <bool name="carrot.outputSubClusters">false</bool>
-
-      <!-- Configure the remaining request handler parameters. -->
-      <str name="defType">edismax</str>
-      <str name="qf">
-        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-      </str>
-      <str name="q.alt">*:*</str>
-      <str name="rows">100</str>
-      <str name="fl">*,score</str>
-    </lst>
-    <arr name="last-components">
-      <str>clustering</str>
-    </arr>
-  </requestHandler>
-
   <!-- Terms Component
 
        http://wiki.apache.org/solr/TermsComponent
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/README.txt b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/README.txt
index 5d560ba..08d6a31 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/README.txt
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/README.txt
@@ -86,8 +86,8 @@ Search Results, General:
                             edit results_list.vm to enable this
 
 
-Search Results, Facets & Clusters:
-  facets.vm               - calls the 4 facet and 1 cluster template
+Search Results, Facets:
+  facets.vm               - calls the other templates
   facet_fields.vm         - display facets based on field values
                             e.g.: fields specified by &facet.field=
   facet_queries.vm        - display facets based on specific facet queries
@@ -96,6 +96,3 @@ Search Results, Facets & Clusters:
                             e.g.: ranges specified by &facet.range=
   facet_pivot.vm          - display pivot based facets
                             e.g.: facets specified by &facet.pivot=
-  cluster.vm              - if clustering is available
-                            then call cluster_results.vm
-  cluster_results.vm      - actual rendering of clusters
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster.vm
deleted file mode 100644
index 09885f3..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster.vm
+++ /dev/null
@@ -1,19 +0,0 @@
-#**
- *  Check if Clustering is Enabled and then
- *  call cluster_results.vm
- *#
-
-<h2 #annTitle("Clusters generated by Carrot2 using the /clustering RequestHandler")>
-  Clusters
-</h2>
-
-## Div tag has placeholder text by default
-<div id="clusters">
-  Run Solr with option -Dsolr.clustering.enabled=true to see clustered search results.
-</div>
-
-## Replace the div content *if* Carrot^2 is available
-<script type="text/javascript">
-  $('#clusters').load("#url_for_solr/clustering#lens",
-    {'wt':'velocity', 'v.template':"cluster_results"});
-</script>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster_results.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster_results.vm
deleted file mode 100644
index 204480d..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster_results.vm
+++ /dev/null
@@ -1,31 +0,0 @@
-#**
- *  Actual rendering of Clusters
- *#
-
-## For each cluster
-#foreach ($clusters in $response.response.clusters)
-
-  #set($labels = $clusters.get('labels'))
-  #set($docs = $clusters.get('docs'))
-
-  ## This Cluster's Heading
-  <h3>
-    #foreach ($label in $labels)
-      ## Keep the following line together to prevent
-      ## a space appearing before each comma
-      $label#if( $foreach.hasNext ),#end
-    #end
-  </h3>
-
-  ## This Cluster's Documents
-  <ol>
-    ## For each doc in this cluster
-    #foreach ($cluDoc in $docs)
-      <li>
-        <a href="#url_for_home?q=id:$cluDoc">
-          $cluDoc</a>
-      </li>
-    #end
-  </ol>
-
-#end   ## end for each Cluster
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facets.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facets.vm
index 55d40c9..2e6504c 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facets.vm
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facets.vm
@@ -7,4 +7,3 @@
 #parse('facet_queries.vm')
 #parse('facet_ranges.vm')
 #parse('facet_pivot.vm')
-#parse('cluster.vm')
diff --git a/solr/solr-ref-guide/src/overview-of-searching-in-solr.adoc b/solr/solr-ref-guide/src/overview-of-searching-in-solr.adoc
index 4de2b39..19fbfc0 100644
--- a/solr/solr-ref-guide/src/overview-of-searching-in-solr.adoc
+++ b/solr/solr-ref-guide/src/overview-of-searching-in-solr.adoc
@@ -38,7 +38,7 @@ A search query can request that certain terms be highlighted in the search respo
 
 Search responses can also be configured to include *snippets* (document excerpts) featuring highlighted text. Popular search engines such as Google and Yahoo! return snippets in their search results: 3-4 lines of text offering a description of a search result.
 
-To help users zero in on the content they're looking for, Solr supports two special ways of grouping search results to aid further exploration: faceting and clustering.
+To help users zero in on the content they're looking for, Solr supports grouping search results to aid further exploration.
 
 <<faceting.adoc#faceting,*Faceting*>> is the arrangement of search results into categories (which are based on indexed terms). Within each category, Solr reports on the number of hits for relevant term, which is called a facet constraint. Faceting makes it easy for users to explore search results on sites such as movie sites and product review sites, where there are many categories and many items within a category.
 
@@ -48,8 +48,6 @@ image::images/overview-of-searching-in-solr/worddav88969a784fb8a63d8c46e9c043f5f
 
 Faceting makes use of fields defined when the search applications were indexed. In the example above, these fields include categories of information that are useful for describing digital cameras: manufacturer, resolution, and zoom range.
 
-*Clustering* groups search results by similarities discovered when a search is executed, rather than when content is indexed. The results of clustering often lack the neat hierarchical organization found in faceted search results, but clustering can be useful nonetheless. It can reveal unexpected commonalities among search results, and it can help users rule out content that isn't pertinent to what they're really searching for.
-
 Solr also supports a feature called <<morelikethis.adoc#morelikethis,MoreLikeThis>>, which enables users to submit new queries that focus on particular terms returned in an earlier query. MoreLikeThis queries can make use of faceting or clustering to provide additional aid to users.
 
 A Solr component called a <<response-writers.adoc#response-writers,*response writer*>> manages the final presentation of the query response. Solr includes a variety of response writers, including an <<response-writers.adoc#standard-xml-response-writer,XML Response Writer>> and a <<response-writers.adoc#json-response-writer,JSON Response Writer>>.
diff --git a/solr/solr-ref-guide/src/requesthandlers-and-searchcomponents-in-solrconfig.adoc b/solr/solr-ref-guide/src/requesthandlers-and-searchcomponents-in-solrconfig.adoc
index 52e2788..b4b2aeb 100644
--- a/solr/solr-ref-guide/src/requesthandlers-and-searchcomponents-in-solrconfig.adoc
+++ b/solr/solr-ref-guide/src/requesthandlers-and-searchcomponents-in-solrconfig.adoc
@@ -170,7 +170,6 @@ Many of the other useful components are described in sections of this Guide for
 * `QueryElevationComponent`, described in the section <<the-query-elevation-component.adoc#the-query-elevation-component,The Query Elevation Component>>.
 * `TermsComponent`, described in the section <<the-terms-component.adoc#the-terms-component,The Terms Component>>.
 * `RealTimeGetComponent`, described in the section <<realtime-get.adoc#realtime-get,RealTime Get>>.
-* `ClusteringComponent`, described in the section <<result-clustering.adoc#result-clustering,Result Clustering>>.
 * `SuggestComponent`, described in the section <<suggester.adoc#suggester,Suggester>>.
 * `AnalyticsComponent`, described in the section <<analytics.adoc#analytics,Analytics>>.
 
diff --git a/solr/solr-ref-guide/src/result-clustering.adoc b/solr/solr-ref-guide/src/result-clustering.adoc
deleted file mode 100644
index 0ec2eba..0000000
--- a/solr/solr-ref-guide/src/result-clustering.adoc
+++ /dev/null
@@ -1,346 +0,0 @@
-= Result Clustering
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-The *clustering* (or *cluster analysis*) plugin attempts to automatically discover groups of related search hits (documents) and assign human-readable labels to these groups.
-
-By default in Solr, the clustering algorithm is applied to the search result of each single query -— this is called an _on-line_ clustering. While Solr contains an extension for full-index clustering (_off-line_ clustering) this section will focus on discussing on-line clustering only.
-
-Clusters discovered for a given query can be perceived as _dynamic facets_. This is beneficial when regular faceting is difficult (field values are not known in advance) or when the queries are exploratory in nature. Take a look at the https://search.carrot2.org/#/search/web/solr/treemap[Carrot2] project's demo page to see an example of search results clustering in action (the groups in the visualization have been discovered automatically in search results to the right, there is no exter [...]
-
-image::images/result-clustering/carrot2.png[image,width=900]
-
-The query issued to the system was _Solr_. It seems clear that faceting could not yield a similar set of groups, although the goals of both techniques are similar—to let the user explore the set of search results and either rephrase the query or narrow the focus to a subset of current documents. Clustering is also similar to <<result-grouping.adoc#result-grouping,Result Grouping>> in that it can help to look deeper into search results, beyond the top few hits.
-
-== Clustering Concepts
-
-Each *document* passed to the clustering component is composed of several logical parts:
-
-* a unique identifier,
-* origin URL,
-* the title,
-* the main content,
-* a language code of the title and content.
-
-The identifier part is mandatory, everything else is optional but at least one of the text fields (title or content) will be required to make the clustering process reasonable. It is important to remember that logical document parts must be mapped to a particular schema and its fields. The content (text) for clustering can be sourced from either a stored text field or context-filtered using a highlighter, all these options are explained below in the <<Clustering Configuration,configurati [...]
-
-A *clustering algorithm* is the actual logic (implementation) that discovers relationships among the documents in the search result and forms human-readable cluster labels. Depending on the choice of the algorithm the clusters may (and probably will) vary. Solr comes with several algorithms implemented in the open source http://carrot2.org[Carrot2] project, commercial alternatives also exist.
-
-== Clustering Quick Start Example
-
-The "```techproducts```" example included with Solr is pre-configured with all the necessary components for result clustering -- but they are disabled by default.
-
-To enable the clustering component contrib and a dedicated search handler configured to use it, specify a JVM System Property when running the example:
-
-[source,bash]
-----
-bin/solr start -e techproducts -Dsolr.clustering.enabled=true
-----
-
-You can now try out the clustering handler by opening the following URL in a browser:
-
-`\http://localhost:8983/solr/techproducts/clustering?q=\*:*&rows=100&wt=xml`
-
-The output XML should include search hits and an array of automatically discovered clusters at the end, resembling the output shown here:
-
-[source,xml]
-----
-<response>
-  <lst name="responseHeader">
-    <int name="status">0</int>
-    <int name="QTime">299</int>
-  </lst>
-  <result name="response" numFound="32" start="0" maxScore="1.0">
-    <doc>
-      <str name="id">GB18030TEST</str>
-      <str name="name">Test with some GB18030 encoded characters</str>
-      <arr name="features">
-        <str>No accents here</str>
-        <str>这是一个功能</str>
-        <str>This is a feature (translated)</str>
-        <str>这份文件是很有光泽</str>
-        <str>This document is very shiny (translated)</str>
-      </arr>
-      <float name="price">0.0</float>
-      <str name="price_c">0,USD</str>
-      <bool name="inStock">true</bool>
-      <long name="_version_">1448955395025403904</long>
-      <float name="score">1.0</float>
-    </doc>
-
-    <!-- more search hits, omitted -->
-  </result>
-
-  <arr name="clusters">
-    <lst>
-      <arr name="labels">
-        <str>DDR</str>
-      </arr>
-      <double name="score">3.9599865057283354</double>
-      <arr name="docs">
-        <str>TWINX2048-3200PRO</str>
-        <str>VS1GB400C3</str>
-        <str>VDBDB1A16</str>
-      </arr>
-    </lst>
-    <lst>
-      <arr name="labels">
-        <str>iPod</str>
-      </arr>
-      <double name="score">11.959228467119022</double>
-      <arr name="docs">
-        <str>F8V7067-APL-KIT</str>
-        <str>IW-02</str>
-        <str>MA147LL/A</str>
-      </arr>
-    </lst>
-
-    <!-- More clusters here, omitted. -->
-
-    <lst>
-      <arr name="labels">
-        <str>Other Topics</str>
-      </arr>
-      <double name="score">0.0</double>
-      <bool name="other-topics">true</bool>
-      <arr name="docs">
-        <str>adata</str>
-        <str>apple</str>
-        <str>asus</str>
-        <str>ati</str>
-        <!-- other unassigned document IDs here -->
-      </arr>
-    </lst>
-  </arr>
-</response>
-----
-
-There were a few clusters discovered for this query (`\*:*`), separating search hits into various categories: DDR, iPod, Hard Drive, etc. Each cluster has a label and score that indicates the "goodness" of the cluster. The score is algorithm-specific and is meaningful only in relation to the scores of other clusters in the same set. In other words, if cluster _A_ has a higher score than cluster _B_, cluster _A_ should be of better quality (have a better label and/or more coherent documen [...]
-
-Depending on the quality of input documents, some clusters may not make much sense. Some documents may be left out and not be clustered at all; these will be assigned to the synthetic _Other Topics_ group, marked with the `other-topics` property set to `true` (see the XML dump above for an example). The score of the other topics group is zero.
-
-== Installing the Clustering Contrib
-
-The clustering contrib extension requires `dist/solr-clustering-*.jar` and all JARs under `contrib/clustering/lib`.
-
-== Clustering Configuration
-
-=== Declaration of the Clustering Search Component and Request Handler
-
-Clustering extension is a search component and must be declared in `solrconfig.xml`. Such a component can be then appended to a request handler as the last component in the chain (because it requires search results which must be previously fetched by the search component).
-
-An example configuration could look as shown below.
-
-. Include the required contrib JARs. Note that by default paths are relative to the Solr core so they may need adjustments to your configuration, or an explicit specification of the `$solr.install.dir`.
-+
-[source,xml]
-----
-<lib dir="${solr.install.dir:../../..}/contrib/clustering/lib/" regex=".*\.jar" />
-<lib dir="${solr.install.dir:../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
-----
-. Declaration of the search component. Each component can also declare multiple clustering pipelines ("engines"), which can be selected at runtime by passing `clustering.engine=(engine name)` URL parameter.
-+
-[source,xml]
-----
-<searchComponent name="clustering" class="solr.clustering.ClusteringComponent">
-  <!-- Lingo clustering algorithm -->
-  <lst name="engine">
-    <str name="name">lingo</str>
-    <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
-  </lst>
-
-  <!-- An example definition for the STC clustering algorithm. -->
-  <lst name="engine">
-    <str name="name">stc</str>
-    <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
-  </lst>
-</searchComponent>
-----
-. A request handler to which we append the clustering component declared above.
-+
-[source,xml]
-----
-<requestHandler name="/clustering"
-                class="solr.SearchHandler">
-  <lst name="defaults">
-    <bool name="clustering">true</bool>
-    <bool name="clustering.results">true</bool>
-
-    <!-- Logical field to physical field mapping. -->
-    <str name="carrot.url">id</str>
-    <str name="carrot.title">doctitle</str>
-    <str name="carrot.snippet">content</str>
-
-    <!-- Configure any other request handler parameters. We will cluster the
-         top 100 search results so bump up the 'rows' parameter. -->
-    <str name="rows">100</str>
-    <str name="fl">*,score</str>
-  </lst>
-
-  <!-- Append clustering at the end of the list of search components. -->
-  <arr name="last-components">
-    <str>clustering</str>
-  </arr>
-</requestHandler>
-----
-
-=== Configuration Parameters of the Clustering Component
-
-The following parameters of each clustering engine or the entire clustering component (depending where they are declared) are available.
-
-`clustering`::
-When `true`, clustering component is enabled.
-
-`clustering.engine`::
-Declares which clustering engine to use. If not present, the first declared engine will become the default one.
-
-`clustering.results`::
-When `true`, the component will perform clustering of search results (this should be enabled).
-
-`clustering.collection`::
-When `true`, the component will perform clustering of the whole document index (this section does not cover full-index clustering).
-
-At the engine declaration level, the following parameters are supported.
-
-`carrot.algorithm`::
-The algorithm class.
-
-`carrot.resourcesDir`::
-Algorithm-specific resources and configuration files (stop words, other lexical resources, default settings). By default points to `conf/clustering/carrot2/`
-
-`carrot.outputSubClusters`::
-If `true` and the algorithm supports hierarchical clustering, sub-clusters will also be emitted. Default value: true.
-
-`carrot.numDescriptions`::
-Maximum number of per-cluster labels to return (if the algorithm assigns more than one label to a cluster).
-
-The `carrot.algorithm` parameter should contain a fully qualified class name of an algorithm supported by the http://project.carrot2.org[Carrot2] framework. Currently, the following algorithms are available:
-
-* `org.carrot2.clustering.lingo.LingoClusteringAlgorithm` (open source)
-* `org.carrot2.clustering.stc.STCClusteringAlgorithm` (open source)
-* `org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm` (open source)
-* `com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm` (commercial)
-
-For a comparison of characteristics of these algorithms see the following links:
-
-* http://doc.carrot2.org/#section.advanced-topics.fine-tuning.choosing-algorithm
-* http://project.carrot2.org/algorithms.html
-* http://carrotsearch.com/lingo3g-comparison.html
-
-The question of which algorithm to choose depends on the amount of traffic (STC is faster than Lingo, but arguably produces less intuitive clusters, Lingo3G is the fastest algorithm but is not free or open source), expected result (Lingo3G provides hierarchical clusters, Lingo and STC provide flat clusters), and the input data (each algorithm will cluster the input slightly differently). There is no one answer which algorithm is "the best".
-
-=== Contextual and Full Field Clustering
-
-The clustering engine can apply clustering to the full content of (stored) fields or it can run an internal highlighter pass to extract context-snippets before clustering. Highlighting is recommended when the logical snippet field contains a lot of content (this would affect clustering performance). Highlighting can also increase the quality of clustering because the content passed to the algorithm will be more focused around the query (it will be query-specific context). The following p [...]
-
-`carrot.produceSummary`::
-When `true` the clustering component will run a highlighter pass on the content of logical fields pointed to by `carrot.title` and `carrot.snippet`. Otherwise full content of those fields will be clustered.
-
-`carrot.fragSize`::
-The size, in characters, of the snippets (aka fragments) created by the highlighter. If not specified, the default highlighting fragsize (`hl.fragsize`) will be used.
-
-`carrot.summarySnippets`:: The number of summary snippets to generate for clustering. If not specified, the default highlighting snippet count (`hl.snippets`) will be used.
-
-=== Logical to Document Field Mapping
-
-As already mentioned in <<Clustering Concepts>>, the clustering component clusters "documents" consisting of logical parts that need to be mapped onto physical schema of data stored in Solr. The field mapping attributes provide a connection between fields and logical document parts. Note that the content of title and snippet fields must be *stored* so that it can be retrieved at search time.
-
-`carrot.title`::
-The field (alternatively comma- or space-separated list of fields) that should be mapped to the logical document's title. The clustering algorithms typically give more weight to the content of the title field compared to the content (snippet). For best results, the field should contain concise, noise-free content. If there is no clear title in your data, you can leave this parameter blank.
-
-`carrot.snippet`::
-The field (alternatively comma- or space-separated list of fields) that should be mapped to the logical document's main content. If this mapping points to very large content fields the performance of clustering may drop significantly. An alternative then is to use query-context snippets for clustering instead of full field content. See the description of the `carrot.produceSummary` parameter for details.
-
-`carrot.url`::
-The field that should be mapped to the logical document's content URL. Leave blank if not required.
-
-=== Clustering Multilingual Content
-
-The field mapping specification can include a `carrot.lang` parameter, which defines the field that stores http://www.loc.gov/standards/iso639-2/php/code_list.php[ISO 639-1] code of the language in which the title and content of the document are written. This information can be stored in the index based on apriori knowledge of the documents' source or a language detection filter applied at indexing time. All algorithms inside the Carrot2 framework will accept ISO codes of languages defin [...]
-
-The language hint makes it easier for clustering algorithms to separate documents from different languages on input and to pick the right language resources for clustering. If you do have multi-lingual query results (or query results in a language different than English), it is strongly advised to map the language field appropriately.
-
-`carrot.lang`::
-The field that stores ISO 639-1 code of the language of the document's text fields.
-
-`carrot.lcmap`::
-A mapping of arbitrary strings into ISO 639 two-letter codes used by `carrot.lang`. The syntax of this parameter is the same as `langid.map.lcmap`, for example: `langid.map.lcmap=japanese:ja polish:pl english:en`
-
-The default language can also be set using Carrot2-specific algorithm attributes (in this case the http://doc.carrot2.org/#section.attribute.lingo.MultilingualClustering.defaultLanguage[MultilingualClustering.defaultLanguage] attribute).
-
-== Tweaking Algorithm Settings
-
-The algorithms that come with Solr are using their default settings which may be inadequate for all data sets. All algorithms have lexical resources and resources (stop words, stemmers, parameters) that may require tweaking to get better clusters (and cluster labels). For Carrot2-based algorithms it is probably best to refer to a dedicated tuning application called Carrot2 Workbench (screenshot below). From this application one can export a set of algorithm attributes as an XML file, whi [...]
-
-image::images/result-clustering/carrot2-workbench.png[image,scaledwidth=75.0%]
-
-=== Providing Defaults for Clustering
-
-The default attributes for all engines (algorithms) declared in the clustering component are placed under `carrot.resourcesDir` and with an expected file name of `engineName-attributes.xml`. So for an engine named `lingo` and the default value of `carrot.resourcesDir`, the attributes would be read from a file in `conf/clustering/carrot2/lingo-attributes.xml`.
-
-An example XML file changing the default language of documents to Polish is shown below.
-
-[source,xml]
-----
-<attribute-sets default="attributes">
-  <attribute-set id="attributes">
-    <value-set>
-      <label>attributes</label>
-      <attribute key="MultilingualClustering.defaultLanguage">
-        <value type="org.carrot2.core.LanguageCode" value="POLISH"/>
-      </attribute>
-    </value-set>
-  </attribute-set>
-</attribute-sets>
-----
-
-=== Tweaking Algorithms at Query-Time
-
-The clustering component and Carrot2 clustering algorithms can accept query-time attribute overrides. Note that certain things (for example lexical resources) can only be initialized once (at startup, via the XML configuration files).
-
-An example query that changes the `LingoClusteringAlgorithm.desiredClusterCountBase` parameter for the Lingo algorithm:
-
-[source,text]
-http://localhost:8983/solr/techproducts/clustering?q=*:*&rows=100&LingoClusteringAlgorithm.desiredClusterCountBase=20
-
-The clustering engine (the algorithm declared in `solrconfig.xml`) can also be changed at runtime by passing `clustering.engine=name` request attribute:
-
-[source,text]
-http://localhost:8983/solr/techproducts/clustering?q=*:*&rows=100&clustering.engine=kmeans
-
-== Performance Considerations with Dynamic Clustering
-
-Dynamic clustering of search results comes with two major performance penalties:
-
-* Increased cost of fetching a larger-than-usual number of search results (50, 100 or more documents),
-* Additional computational cost of the clustering itself.
-
-For simple queries, the clustering time will usually dominate the fetch time. If the document content is very long the retrieval of stored content can become a bottleneck. The performance impact of clustering can be lowered in several ways:
-
-* feed less content to the clustering algorithm by enabling `carrot.produceSummary` attribute,
-* perform clustering on selected fields (titles only) to make the input smaller,
-* use a faster algorithm (STC instead of Lingo, Lingo3G instead of STC),
-* tune the performance attributes related directly to a specific algorithm.
-
-Some of these techniques are described in _Apache SOLR and Carrot2 integration strategies_ document, available at http://carrot2.github.io/solr-integration-strategies. The topic of improving performance is also included in the Carrot2 manual at http://doc.carrot2.org/#section.advanced-topics.fine-tuning.performance.
-
-== Additional Resources
-
-The following resources provide additional information about the clustering component in Solr and its potential applications.
-
-* Apache Solr and Carrot2 integration strategies: http://carrot2.github.io/solr-integration-strategies
-* Clustering and Visualization of Solr search results (Berlin BuzzWords conference, 2011): http://2011.berlinbuzzwords.de/sites/2011.berlinbuzzwords.de/files/solr-clustering-visualization.pdf
diff --git a/solr/solr-ref-guide/src/searching.adoc b/solr/solr-ref-guide/src/searching.adoc
index cc84627..fe0fdf3 100644
--- a/solr/solr-ref-guide/src/searching.adoc
+++ b/solr/solr-ref-guide/src/searching.adoc
@@ -16,7 +16,6 @@
   pagination-of-results, +
   collapse-and-expand-results, +
   result-grouping, +
-  result-clustering, +
   spatial-search, +
   the-terms-component, +
   the-term-vector-component, +
@@ -75,7 +74,6 @@ This section describes how Solr works with search requests. It covers the follow
 * <<morelikethis.adoc#morelikethis,MoreLikeThis>>: Detailed information about Solr's similar results query component.
 * <<pagination-of-results.adoc#pagination-of-results,Pagination of Results>>: Detailed information about fetching paginated results for display in a UI, or for fetching all documents matching a query.
 * <<result-grouping.adoc#result-grouping,Result Grouping>>: Detailed information about grouping results based on common field values.
-* <<result-clustering.adoc#result-clustering,Result Clustering>>: Detailed information about grouping search results based on cluster analysis applied to text fields. A bit like "unsupervised" faceting.
 * <<spatial-search.adoc#spatial-search,Spatial Search>>: How to use Solr's spatial search capabilities.
 * <<the-terms-component.adoc#the-terms-component,The Terms Component>>: Detailed information about accessing indexed terms and the documents that include them.
 * <<the-term-vector-component.adoc#the-term-vector-component,The Term Vector Component>>: How to get term information about specific documents.
diff --git a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
index 960b007..3703580 100644
--- a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
+++ b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
@@ -34,6 +34,14 @@ Detailed steps for upgrading a Solr cluster are in the section <<upgrading-a-sol
 
 If you are upgrading from 7.x, see the section <<Upgrading from 7.x Releases>> below.
 
+=== Solr 8.8
+
+*Removed Contribs*
+
+* The search results clustering contrib has been removed from 8.x Solr line due to lack 
+  of Java 1.8 compatibility in the dependency providing on-line clustering of search results.
+  See SOLR-14981 for more details.
+
 === Solr 8.7
 
 See the https://cwiki.apache.org/confluence/display/SOLR/ReleaseNote87[8.7 Release Notes^]
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/Cluster.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/Cluster.java
index 378e1a7..f5afa70 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/Cluster.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/Cluster.java
@@ -22,8 +22,6 @@ import java.util.Objects;
 
 /**
  * This class represents a cluster of Solr Docs .
- * The cluster is produced from a set of Solr documents from the results.
- * It is a direct mapping for the Json object Solr is returning.
  */
 public class Cluster {
 
diff --git a/solr/solrj/src/test-files/solrj/sampleClusteringResponse.xml b/solr/solrj/src/test-files/solrj/sampleClusteringResponse.xml
deleted file mode 100644
index ea042c9..0000000
--- a/solr/solrj/src/test-files/solrj/sampleClusteringResponse.xml
+++ /dev/null
@@ -1,112 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<response>
-  <lst name="responseHeader">
-    <int name="status">0</int>
-    <int name="QTime">10</int>
-    <lst name="params">
-      <str name="fl">id</str>
-      <str name="indent">true</str>
-      <str name="q">Test Query</str>
-      <str name="wt">xml</str>
-      <str name="hl">off</str>
-      <str name="clustering">true</str>
-      <str name="rows">5</str>
-    </lst>
-  </lst>
-  <result name="response" numFound="33" start="0">
-    <doc>
-      <str name="id">id1</str>
-    </doc>
-    <doc>
-      <str name="id">id2</str>
-    </doc>
-    <doc>
-      <str name="id">id3</str>
-    </doc>
-    <doc>
-      <str name="id">id4</str>
-    </doc>
-    <doc>
-      <str name="id">id5</str>
-    </doc>
-  </result>
-  <arr name="clusters">
-    <lst>
-      <arr name="labels">
-        <str>label1</str>
-      </arr>
-      <double name="score">0.6</double>
-      <arr name="docs">
-        <str>id1</str>
-        <str>id2</str>
-        <str>id3</str>
-      </arr>
-      <arr name="clusters">
-        <lst>
-          <arr name="labels">
-            <str>label1.sub1</str>
-          </arr>
-          <arr name="docs">
-            <str>id1</str>
-            <str>id2</str>
-          </arr>
-        </lst>      
-        <lst>
-          <arr name="labels">
-            <str>label1.sub2</str>
-          </arr>
-          <arr name="docs">
-            <str>id2</str>
-          </arr>
-        </lst>      
-      </arr>
-    </lst>
-    <lst>
-      <arr name="labels">
-        <str>label2</str>
-      </arr>
-      <double name="score">0.93</double>
-      <arr name="docs">
-        <str>id5</str>
-        <str>id6</str>
-      </arr>
-    </lst>
-    <lst>
-      <arr name="labels">
-        <str>label3</str>
-      </arr>
-      <double name="score">1.26</double>
-      <arr name="docs">
-        <str>id7</str>
-        <str>id8</str>
-      </arr>
-    </lst>
-    <lst>
-      <arr name="labels">
-        <str>label4</str>
-      </arr>
-      <double name="score">0.0</double>
-      <bool name="other-topics">true</bool>
-      <arr name="docs">
-        <str>id9</str>
-      </arr>
-    </lst>
-  </arr>
-</response>
\ No newline at end of file
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestClusteringResponse.java b/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestClusteringResponse.java
deleted file mode 100644
index 75bb0d3..0000000
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/response/TestClusteringResponse.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.response;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.nio.charset.StandardCharsets;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.solr.SolrJettyTestBase;
-import org.apache.solr.client.solrj.impl.XMLResponseParser;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrResourceLoader;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test for ClusteringComponent's response in Solrj
- */
-public class TestClusteringResponse extends SolrJettyTestBase {
-
-  @Test
-  public void testClusteringResponse() throws Exception {
-    XMLResponseParser parser = new XMLResponseParser();
-    NamedList<Object> response = null;
-
-    /*Load a simple XML with the clustering response encoded in an XML format*/
-    try (SolrResourceLoader loader = new SolrResourceLoader();
-         InputStream is = loader.openResource("solrj/sampleClusteringResponse.xml")) {
-      assertNotNull(is);
-      try (Reader in = new InputStreamReader(is, StandardCharsets.UTF_8)) {
-        response = parser.processResponse(in);
-      }
-    }
-    QueryResponse qr = new QueryResponse(response, null);
-    ClusteringResponse clusteringResponse = qr.getClusteringResponse();
-    List<Cluster> clusters = clusteringResponse.getClusters();
-    Assert.assertEquals(4, clusters.size());
-
-    checkCluster(clusters.get(0), Arrays.asList("label1"), Arrays.asList("id1", "id2", "id3"), 0.6d, false);
-    checkCluster(clusters.get(1), Arrays.asList("label2"), Arrays.asList("id5", "id6"), 0.93d, false);
-    checkCluster(clusters.get(2), Arrays.asList("label3"), Arrays.asList("id7", "id8"), 1.26d, false);
-    checkCluster(clusters.get(3), Arrays.asList("label4"), Arrays.asList("id9"), 0d, true);
-    
-    List<Cluster> sub = clusters.get(0).getSubclusters();
-    checkCluster(sub.get(0), Arrays.asList("label1.sub1"), Arrays.asList("id1", "id2"), 0.0d, false);
-    checkCluster(sub.get(1), Arrays.asList("label1.sub2"), Arrays.asList("id2"), 0.0d, false);
-    assertEquals(sub.size(), 2);
-  }
-
-  private void checkCluster(Cluster cluster, List<String> labels, List<String> docRefs, double score, boolean otherTopics) {
-    Assert.assertEquals(cluster.getLabels(), labels);
-    Assert.assertEquals(cluster.getDocs(), docRefs);
-    Assert.assertTrue(Double.compare(cluster.getScore(), score) == 0);
-    Assert.assertEquals(otherTopics, cluster.isOtherTopics());
-  }
-}
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestZkMaintenanceUtils.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestZkMaintenanceUtils.java
index 3f6b6d7..661844a 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestZkMaintenanceUtils.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestZkMaintenanceUtils.java
@@ -28,8 +28,6 @@ import org.apache.commons.io.FileUtils;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.cloud.ZkTestServer;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkMaintenanceUtils;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.junit.AfterClass;
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
index b9db14f..ac1c760 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
@@ -53,7 +53,6 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
   private static final String BIN_FILE_LOCATION_CHILD_DOCS = "./solr/solrj/src/test-files/solrj/javabin_backcompat_child_docs.bin";
 
   private static final String SOLRJ_DOCS_1 = "/solrj/docs1.xml";
-  private static final String SOLRJ_DOCS_2 = "/solrj/sampleClusteringResponse.xml";
 
   public void testStrings() throws Exception {
     for (int i = 0; i < 10000 * RANDOM_MULTIPLIER; i++) {
@@ -300,15 +299,8 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 {
     Map.Entry<Object, Object> entryFromTextDoc1 = getMapFromJavaBinCodec(SOLRJ_DOCS_1);
     Map.Entry<Object, Object> entryFromTextDoc1_clone = getMapFromJavaBinCodec(SOLRJ_DOCS_1);
 
-    Map.Entry<Object, Object> entryFromTextDoc2 = getMapFromJavaBinCodec(SOLRJ_DOCS_2);
-    Map.Entry<Object, Object> entryFromTextDoc2_clone = getMapFromJavaBinCodec(SOLRJ_DOCS_2);
-
     // exactly same document read twice should have same content
     assertEquals ("text-doc1 exactly same document read twice should have same content",entryFromTextDoc1,entryFromTextDoc1_clone);
-    // doc1 and doc2 are 2 text files with different content on line 1
-    assertNotEquals ("2 text streams with 2 different contents should be unequal",entryFromTextDoc2,entryFromTextDoc1);
-    // exactly same document read twice should have same content
-    assertEquals ("text-doc2 exactly same document read twice should have same content",entryFromTextDoc2,entryFromTextDoc2_clone);
   }
 
   @Test
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index a3f325b..a0831bc 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -286,7 +286,6 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
     System.setProperty("jetty.testMode", "true");
     System.setProperty("enable.update.log", usually() ? "true" : "false");
     System.setProperty("tests.shardhandler.randomSeed", Long.toString(random().nextLong()));
-    System.setProperty("solr.clustering.enabled", "false");
     System.setProperty("solr.cloud.wait-for-updates-with-stale-state-pause", "500");
     System.setProperty(ZK_WHITELIST_PROPERTY, "*");
     startTrackingSearchers();